2008-11-03 04:13:42 +00:00
|
|
|
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: t; c-basic-offset: 4 -*- */
|
|
|
|
|
/* NetworkManager -- Network link manager
|
|
|
|
|
*
|
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
|
* (at your option) any later version.
|
|
|
|
|
*
|
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
|
*
|
|
|
|
|
* You should have received a copy of the GNU General Public License along
|
|
|
|
|
* with this program; if not, write to the Free Software Foundation, Inc.,
|
|
|
|
|
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
|
|
|
*
|
2009-05-20 12:02:18 -04:00
|
|
|
* Copyright (C) 2007 - 2009 Novell, Inc.
|
2017-03-20 13:36:00 +00:00
|
|
|
* Copyright (C) 2007 - 2017 Red Hat, Inc.
|
2008-11-03 04:13:42 +00:00
|
|
|
*/
|
2007-08-15 07:52:25 +00:00
|
|
|
|
2016-02-19 14:57:48 +01:00
|
|
|
#include "nm-default.h"
|
2010-08-13 13:18:58 -05:00
|
|
|
|
2016-05-16 18:08:08 +02:00
|
|
|
#include "nm-manager.h"
|
|
|
|
|
|
2013-05-27 20:35:33 +02:00
|
|
|
#include <stdlib.h>
|
2011-04-22 14:56:31 -05:00
|
|
|
#include <fcntl.h>
|
|
|
|
|
#include <errno.h>
|
2007-02-08 15:34:26 +00:00
|
|
|
#include <string.h>
|
2012-02-22 16:52:41 +01:00
|
|
|
#include <unistd.h>
|
2007-02-08 15:34:26 +00:00
|
|
|
|
2018-04-18 09:06:54 +02:00
|
|
|
#include "nm-utils/nm-c-list.h"
|
|
|
|
|
|
2016-06-01 12:31:36 +02:00
|
|
|
#include "nm-common-macros.h"
|
2018-03-02 05:55:21 +01:00
|
|
|
#include "nm-dbus-manager.h"
|
2016-11-21 00:43:52 +01:00
|
|
|
#include "vpn/nm-vpn-manager.h"
|
|
|
|
|
#include "devices/nm-device.h"
|
|
|
|
|
#include "devices/nm-device-generic.h"
|
|
|
|
|
#include "platform/nm-platform.h"
|
2017-07-04 22:29:27 +02:00
|
|
|
#include "platform/nmp-object.h"
|
2017-04-23 14:20:37 +02:00
|
|
|
#include "nm-hostname-manager.h"
|
2013-05-09 10:24:08 -04:00
|
|
|
#include "nm-rfkill-manager.h"
|
2016-11-21 00:43:52 +01:00
|
|
|
#include "dhcp/nm-dhcp-manager.h"
|
|
|
|
|
#include "settings/nm-settings.h"
|
|
|
|
|
#include "settings/nm-settings-connection.h"
|
2014-08-16 01:33:46 +02:00
|
|
|
#include "nm-auth-utils.h"
|
2014-08-14 13:34:57 +02:00
|
|
|
#include "nm-auth-manager.h"
|
2011-01-13 13:30:30 -06:00
|
|
|
#include "NetworkManagerUtils.h"
|
2016-11-21 00:43:52 +01:00
|
|
|
#include "devices/nm-device-factory.h"
|
2012-10-09 00:36:35 -04:00
|
|
|
#include "nm-sleep-monitor.h"
|
2011-10-21 21:21:30 +02:00
|
|
|
#include "nm-connectivity.h"
|
2013-08-22 10:10:17 -04:00
|
|
|
#include "nm-policy.h"
|
2014-07-17 17:06:44 -04:00
|
|
|
#include "nm-session-monitor.h"
|
2016-09-28 19:37:10 +02:00
|
|
|
#include "nm-act-request.h"
|
2014-10-21 22:09:52 -04:00
|
|
|
#include "nm-core-internal.h"
|
2014-10-29 09:12:18 -05:00
|
|
|
#include "nm-config.h"
|
2015-07-14 10:26:54 +02:00
|
|
|
#include "nm-audit-manager.h"
|
2015-04-15 14:53:30 -04:00
|
|
|
#include "nm-dbus-compat.h"
|
2016-07-01 12:11:01 +02:00
|
|
|
#include "nm-checkpoint.h"
|
|
|
|
|
#include "nm-checkpoint-manager.h"
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
#include "nm-dbus-object.h"
|
2017-03-20 13:36:00 +00:00
|
|
|
#include "nm-dispatcher.h"
|
2015-08-10 16:38:26 +02:00
|
|
|
#include "NetworkManagerUtils.h"
|
2007-02-08 15:34:26 +00:00
|
|
|
|
2017-12-05 12:35:38 +01:00
|
|
|
/*****************************************************************************/
|
2007-10-01 15:38:39 +00:00
|
|
|
|
2009-12-23 00:03:45 -08:00
|
|
|
typedef struct {
|
2010-09-01 17:08:10 -05:00
|
|
|
gboolean user_enabled;
|
|
|
|
|
gboolean sw_enabled;
|
2009-12-23 00:03:45 -08:00
|
|
|
gboolean hw_enabled;
|
2010-03-25 11:36:19 -07:00
|
|
|
RfKillType rtype;
|
2015-11-06 16:45:27 +01:00
|
|
|
NMConfigRunStatePropertyType key;
|
2009-12-23 00:03:45 -08:00
|
|
|
const char *desc;
|
|
|
|
|
const char *prop;
|
|
|
|
|
const char *hw_prop;
|
|
|
|
|
} RadioState;
|
|
|
|
|
|
2018-04-18 09:06:54 +02:00
|
|
|
typedef enum {
|
|
|
|
|
ASYNC_OP_TYPE_AC_AUTH_ACTIVATE_INTERNAL,
|
|
|
|
|
ASYNC_OP_TYPE_AC_AUTH_ACTIVATE_USER,
|
|
|
|
|
ASYNC_OP_TYPE_AC_AUTH_ADD_AND_ACTIVATE,
|
|
|
|
|
} AsyncOpType;
|
|
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
|
CList async_op_lst;
|
|
|
|
|
NMManager *self;
|
|
|
|
|
AsyncOpType async_op_type;
|
|
|
|
|
union {
|
|
|
|
|
struct {
|
|
|
|
|
NMActiveConnection *active;
|
|
|
|
|
union {
|
|
|
|
|
struct {
|
|
|
|
|
GDBusMethodInvocation *invocation;
|
|
|
|
|
} activate_user;
|
|
|
|
|
struct {
|
|
|
|
|
GDBusMethodInvocation *invocation;
|
|
|
|
|
NMConnection *connection;
|
2018-10-25 16:54:37 +02:00
|
|
|
NMSettingsConnectionPersistMode persist;
|
2018-04-18 09:06:54 +02:00
|
|
|
} add_and_activate;
|
|
|
|
|
};
|
|
|
|
|
} ac_auth;
|
|
|
|
|
};
|
|
|
|
|
} AsyncOpData;
|
|
|
|
|
|
2017-12-05 12:35:38 +01:00
|
|
|
enum {
|
|
|
|
|
DEVICE_ADDED,
|
|
|
|
|
INTERNAL_DEVICE_ADDED,
|
|
|
|
|
DEVICE_REMOVED,
|
|
|
|
|
INTERNAL_DEVICE_REMOVED,
|
|
|
|
|
ACTIVE_CONNECTION_ADDED,
|
|
|
|
|
ACTIVE_CONNECTION_REMOVED,
|
|
|
|
|
CONFIGURE_QUIT,
|
|
|
|
|
|
|
|
|
|
LAST_SIGNAL
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static guint signals[LAST_SIGNAL] = { 0 };
|
|
|
|
|
|
|
|
|
|
NM_GOBJECT_PROPERTIES_DEFINE (NMManager,
|
|
|
|
|
PROP_VERSION,
|
|
|
|
|
PROP_CAPABILITIES,
|
|
|
|
|
PROP_STATE,
|
|
|
|
|
PROP_STARTUP,
|
|
|
|
|
PROP_NETWORKING_ENABLED,
|
|
|
|
|
PROP_WIRELESS_ENABLED,
|
|
|
|
|
PROP_WIRELESS_HARDWARE_ENABLED,
|
|
|
|
|
PROP_WWAN_ENABLED,
|
|
|
|
|
PROP_WWAN_HARDWARE_ENABLED,
|
|
|
|
|
PROP_WIMAX_ENABLED,
|
|
|
|
|
PROP_WIMAX_HARDWARE_ENABLED,
|
|
|
|
|
PROP_ACTIVE_CONNECTIONS,
|
|
|
|
|
PROP_CONNECTIVITY,
|
|
|
|
|
PROP_CONNECTIVITY_CHECK_AVAILABLE,
|
|
|
|
|
PROP_CONNECTIVITY_CHECK_ENABLED,
|
|
|
|
|
PROP_PRIMARY_CONNECTION,
|
|
|
|
|
PROP_PRIMARY_CONNECTION_TYPE,
|
|
|
|
|
PROP_ACTIVATING_CONNECTION,
|
|
|
|
|
PROP_DEVICES,
|
|
|
|
|
PROP_METERED,
|
|
|
|
|
PROP_GLOBAL_DNS_CONFIGURATION,
|
|
|
|
|
PROP_ALL_DEVICES,
|
|
|
|
|
PROP_CHECKPOINTS,
|
|
|
|
|
|
|
|
|
|
/* Not exported */
|
|
|
|
|
PROP_SLEEPING,
|
|
|
|
|
);
|
|
|
|
|
|
2007-02-08 15:34:26 +00:00
|
|
|
typedef struct {
|
2017-09-29 15:11:33 +02:00
|
|
|
NMPlatform *platform;
|
|
|
|
|
|
2016-09-15 23:34:24 +03:00
|
|
|
GArray *capabilities;
|
|
|
|
|
|
2017-11-23 21:30:09 +01:00
|
|
|
CList active_connections_lst_head;
|
2018-04-18 09:06:54 +02:00
|
|
|
CList async_op_lst_head;
|
2012-08-22 17:11:31 -05:00
|
|
|
guint ac_cleanup_id;
|
2013-08-22 13:06:51 -04:00
|
|
|
NMActiveConnection *primary_connection;
|
|
|
|
|
NMActiveConnection *activating_connection;
|
2015-06-03 09:15:24 +02:00
|
|
|
NMMetered metered;
|
2012-08-22 17:11:31 -05:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
CList devices_lst_head;
|
|
|
|
|
|
2007-09-25 16:47:53 +00:00
|
|
|
NMState state;
|
2015-01-30 19:52:53 +01:00
|
|
|
NMConfig *config;
|
connectivity: schedule connectivity timers per-device and probe for short outages
It might happen, that connectivitiy is lost only for a moment and
returns soon after. Based on that assumption, when we loose connectivity
we want to have a probe interval where we check for returning
connectivity more frequently.
For that, we handle tracking of the timeouts per-device.
The intervall shall start with 1 seconds, and double the interval time until
the full interval is reached. Actually, due to the implementation, it's unlikely
that we already perform the second check 1 second later. That is because commonly
the first check returns before the one second timeout is reached and bumps the
interval to 2 seconds right away.
Also, we go through extra lengths so that manual connectivity check
delay the periodic checks. By being more smart about that, we can reduce
the number of connectivity checks, but still keeping the promise to
check at least within the requested interval.
The complexity of book keeping the timeouts is remarkable. But I think
it is worth the effort and we should try hard to
- have a connectivity state as accurate as possible. Clearly,
connectivity checking means that we probing, so being more intelligent
about timeout and backoff timers can result in a better connectivity
state. The connectivity state is important because we use it for
the default-route penaly and the GUI indicates bad connectivity.
- be intelligent about avoiding redundant connectivity checks. While
we want to check often to get an accurate connectivity state, we
also want to minimize the number of HTTP requests, in case the
connectivity is established and suppossedly stable.
Also, perform connectivity checks in every state of the device.
Even if a device is disconnected, it still might have connectivity,
for example if the user externally adds an IP address on an unmanaged
device.
https://bugzilla.gnome.org/show_bug.cgi?id=792240
2018-02-20 21:41:14 +01:00
|
|
|
NMConnectivity *concheck_mgr;
|
2013-08-22 10:10:17 -04:00
|
|
|
NMPolicy *policy;
|
2017-04-23 14:20:37 +02:00
|
|
|
NMHostnameManager *hostname_manager;
|
|
|
|
|
|
2015-08-20 14:25:46 +02:00
|
|
|
struct {
|
|
|
|
|
GDBusConnection *connection;
|
|
|
|
|
guint id;
|
|
|
|
|
} prop_filter;
|
2013-05-09 10:24:08 -04:00
|
|
|
NMRfkillManager *rfkill_mgr;
|
2008-01-02 13:42:52 +00:00
|
|
|
|
2017-09-29 15:04:53 +02:00
|
|
|
CList link_cb_lst;
|
|
|
|
|
|
2016-07-01 12:11:01 +02:00
|
|
|
NMCheckpointManager *checkpoint_mgr;
|
|
|
|
|
|
2010-10-27 20:05:23 -05:00
|
|
|
NMSettings *settings;
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2009-12-23 00:03:45 -08:00
|
|
|
RadioState radio_states[RFKILL_TYPE_MAX];
|
libnm, core, cli, tui: fix the capitalization of various types
GLib/Gtk have mostly settled on the convention that two-letter
acronyms in type names remain all-caps (eg, "IO"), but longer acronyms
become initial-caps-only (eg, "Tcp").
NM was inconsistent, with most long acronyms using initial caps only
(Adsl, Cdma, Dcb, Gsm, Olpc, Vlan), but others using all caps (DHCP,
PPP, PPPOE, VPN). Fix libnm and src/ to use initial-caps only for all
three-or-more-letter-long acronyms (and update nmcli and nmtui for the
libnm changes).
2014-06-26 13:44:36 -04:00
|
|
|
NMVpnManager *vpn_manager;
|
2008-03-26 13:43:01 +00:00
|
|
|
|
2012-10-09 00:36:35 -04:00
|
|
|
NMSleepMonitor *sleep_monitor;
|
2008-07-09 14:05:49 +00:00
|
|
|
|
2016-07-01 14:25:34 +02:00
|
|
|
NMAuthManager *auth_mgr;
|
|
|
|
|
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
GHashTable *device_route_metrics;
|
|
|
|
|
|
2010-05-28 18:23:00 -07:00
|
|
|
GSList *auth_chains;
|
2016-05-05 14:14:40 +02:00
|
|
|
GHashTable *sleep_devices;
|
2010-05-28 18:23:00 -07:00
|
|
|
|
2010-07-01 10:32:11 -07:00
|
|
|
/* Firmware dir monitor */
|
|
|
|
|
GFileMonitor *fw_monitor;
|
|
|
|
|
guint fw_changed_id;
|
|
|
|
|
|
2010-10-19 11:10:17 +02:00
|
|
|
guint timestamp_update_id;
|
|
|
|
|
|
2017-03-14 10:42:36 +01:00
|
|
|
guint devices_inited_id;
|
|
|
|
|
|
connectivity: schedule connectivity timers per-device and probe for short outages
It might happen, that connectivitiy is lost only for a moment and
returns soon after. Based on that assumption, when we loose connectivity
we want to have a probe interval where we check for returning
connectivity more frequently.
For that, we handle tracking of the timeouts per-device.
The intervall shall start with 1 seconds, and double the interval time until
the full interval is reached. Actually, due to the implementation, it's unlikely
that we already perform the second check 1 second later. That is because commonly
the first check returns before the one second timeout is reached and bumps the
interval to 2 seconds right away.
Also, we go through extra lengths so that manual connectivity check
delay the periodic checks. By being more smart about that, we can reduce
the number of connectivity checks, but still keeping the promise to
check at least within the requested interval.
The complexity of book keeping the timeouts is remarkable. But I think
it is worth the effort and we should try hard to
- have a connectivity state as accurate as possible. Clearly,
connectivity checking means that we probing, so being more intelligent
about timeout and backoff timers can result in a better connectivity
state. The connectivity state is important because we use it for
the default-route penaly and the GUI indicates bad connectivity.
- be intelligent about avoiding redundant connectivity checks. While
we want to check often to get an accurate connectivity state, we
also want to minimize the number of HTTP requests, in case the
connectivity is established and suppossedly stable.
Also, perform connectivity checks in every state of the device.
Even if a device is disconnected, it still might have connectivity,
for example if the user externally adds an IP address on an unmanaged
device.
https://bugzilla.gnome.org/show_bug.cgi?id=792240
2018-02-20 21:41:14 +01:00
|
|
|
NMConnectivityState connectivity_state;
|
|
|
|
|
|
2017-03-14 10:42:36 +01:00
|
|
|
bool startup:1;
|
|
|
|
|
bool devices_inited:1;
|
|
|
|
|
|
|
|
|
|
bool sleeping:1;
|
|
|
|
|
bool net_enabled:1;
|
2017-12-05 13:55:25 +01:00
|
|
|
|
2018-04-10 15:55:16 +02:00
|
|
|
unsigned connectivity_check_enabled_last:2;
|
|
|
|
|
|
2017-12-05 13:55:25 +01:00
|
|
|
guint delete_volatile_connection_idle_id;
|
|
|
|
|
CList delete_volatile_connection_lst_head;
|
2007-02-08 15:34:26 +00:00
|
|
|
} NMManagerPrivate;
|
|
|
|
|
|
2016-04-28 14:03:32 +02:00
|
|
|
struct _NMManager {
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
NMDBusObject parent;
|
2016-07-01 15:23:29 +02:00
|
|
|
NMManagerPrivate _priv;
|
2016-04-28 14:03:32 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
typedef struct {
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
NMDBusObjectClass parent;
|
2016-04-28 14:03:32 +02:00
|
|
|
} NMManagerClass;
|
|
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
G_DEFINE_TYPE (NMManager, nm_manager, NM_TYPE_DBUS_OBJECT)
|
2007-02-08 15:34:26 +00:00
|
|
|
|
2016-09-05 16:55:07 +02:00
|
|
|
#define NM_MANAGER_GET_PRIVATE(self) _NM_GET_PRIVATE(self, NMManager, NM_IS_MANAGER)
|
|
|
|
|
|
2017-12-05 12:35:38 +01:00
|
|
|
/*****************************************************************************/
|
2007-02-08 15:34:26 +00:00
|
|
|
|
2016-01-18 14:04:56 +01:00
|
|
|
NM_DEFINE_SINGLETON_INSTANCE (NMManager);
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2016-01-18 14:04:56 +01:00
|
|
|
|
|
|
|
|
#define _NMLOG_PREFIX_NAME "manager"
|
2016-03-02 11:38:26 +01:00
|
|
|
#define _NMLOG(level, domain, ...) \
|
2016-01-18 14:04:56 +01:00
|
|
|
G_STMT_START { \
|
2017-06-07 12:46:10 +02:00
|
|
|
const NMLogLevel _level = (level); \
|
|
|
|
|
const NMLogDomain _domain = (domain); \
|
2016-01-18 14:04:56 +01:00
|
|
|
\
|
2017-06-07 12:46:10 +02:00
|
|
|
if (nm_logging_enabled (_level, _domain)) { \
|
|
|
|
|
const NMManager *const _self = (self); \
|
|
|
|
|
char _sbuf[32]; \
|
2016-03-02 11:38:26 +01:00
|
|
|
\
|
2017-06-07 12:46:10 +02:00
|
|
|
_nm_log (_level, _domain, 0, NULL, NULL, \
|
2016-03-02 11:38:26 +01:00
|
|
|
"%s%s: " _NM_UTILS_MACRO_FIRST (__VA_ARGS__), \
|
|
|
|
|
_NMLOG_PREFIX_NAME, \
|
2017-06-07 12:46:10 +02:00
|
|
|
((_self && _self != singleton_instance) \
|
|
|
|
|
? nm_sprintf_buf (_sbuf, "[%p]", _self) \
|
|
|
|
|
: "") \
|
|
|
|
|
_NM_UTILS_MACRO_REST (__VA_ARGS__)); \
|
|
|
|
|
} \
|
|
|
|
|
} G_STMT_END
|
|
|
|
|
|
|
|
|
|
#define _NMLOG2(level, domain, device, ...) \
|
|
|
|
|
G_STMT_START { \
|
|
|
|
|
const NMLogLevel _level = (level); \
|
|
|
|
|
const NMLogDomain _domain = (domain); \
|
|
|
|
|
\
|
|
|
|
|
if (nm_logging_enabled (_level, _domain)) { \
|
|
|
|
|
const NMManager *const _self = (self); \
|
|
|
|
|
const char *const _ifname = _nm_device_get_iface (device); \
|
|
|
|
|
char _sbuf[32]; \
|
|
|
|
|
\
|
|
|
|
|
_nm_log (_level, _domain, 0, \
|
|
|
|
|
_ifname, NULL, \
|
|
|
|
|
"%s%s: %s%s%s" _NM_UTILS_MACRO_FIRST (__VA_ARGS__), \
|
|
|
|
|
_NMLOG_PREFIX_NAME, \
|
|
|
|
|
((_self && _self != singleton_instance) \
|
|
|
|
|
? nm_sprintf_buf (_sbuf, "[%p]", _self) \
|
|
|
|
|
: ""), \
|
|
|
|
|
NM_PRINT_FMT_QUOTED (_ifname, "(", _ifname, "): ", "") \
|
2016-03-02 11:38:26 +01:00
|
|
|
_NM_UTILS_MACRO_REST (__VA_ARGS__)); \
|
|
|
|
|
} \
|
2016-01-18 14:04:56 +01:00
|
|
|
} G_STMT_END
|
2011-01-10 23:39:12 -06:00
|
|
|
|
2017-06-07 12:46:10 +02:00
|
|
|
#define _NMLOG3(level, domain, connection, ...) \
|
|
|
|
|
G_STMT_START { \
|
|
|
|
|
const NMLogLevel _level = (level); \
|
|
|
|
|
const NMLogDomain _domain = (domain); \
|
|
|
|
|
\
|
|
|
|
|
if (nm_logging_enabled (_level, _domain)) { \
|
|
|
|
|
const NMManager *const _self = (self); \
|
|
|
|
|
NMConnection *const _connection = (connection); \
|
|
|
|
|
const char *const _con_id = _nm_connection_get_id (_connection); \
|
|
|
|
|
char _sbuf[32]; \
|
|
|
|
|
\
|
|
|
|
|
_nm_log (_level, _domain, 0, \
|
|
|
|
|
NULL, _nm_connection_get_uuid (_connection), \
|
|
|
|
|
"%s%s: %s%s%s" _NM_UTILS_MACRO_FIRST (__VA_ARGS__), \
|
|
|
|
|
_NMLOG_PREFIX_NAME, \
|
|
|
|
|
((_self && _self != singleton_instance) \
|
|
|
|
|
? nm_sprintf_buf (_sbuf, "[%p]", _self) \
|
|
|
|
|
: ""), \
|
|
|
|
|
NM_PRINT_FMT_QUOTED (_con_id, "(", _con_id, ") ", "") \
|
|
|
|
|
_NM_UTILS_MACRO_REST (__VA_ARGS__)); \
|
|
|
|
|
} \
|
|
|
|
|
} G_STMT_END
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2011-01-10 23:39:12 -06:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
static const NMDBusInterfaceInfoExtended interface_info_manager;
|
|
|
|
|
static const GDBusSignalInfo signal_info_check_permissions;
|
|
|
|
|
static const GDBusSignalInfo signal_info_state_changed;
|
|
|
|
|
static const GDBusSignalInfo signal_info_device_added;
|
|
|
|
|
static const GDBusSignalInfo signal_info_device_removed;
|
|
|
|
|
|
2017-12-05 12:35:38 +01:00
|
|
|
static gboolean add_device (NMManager *self, NMDevice *device, GError **error);
|
|
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
static void _emit_device_added_removed (NMManager *self,
|
|
|
|
|
NMDevice *device,
|
|
|
|
|
gboolean is_added);
|
|
|
|
|
|
2017-12-05 12:35:38 +01:00
|
|
|
static NMActiveConnection *_new_active_connection (NMManager *self,
|
2018-04-12 11:32:18 +02:00
|
|
|
gboolean is_vpn,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *sett_conn,
|
|
|
|
|
NMConnection *incompl_conn,
|
2017-12-05 12:35:38 +01:00
|
|
|
NMConnection *applied,
|
|
|
|
|
const char *specific_object,
|
|
|
|
|
NMDevice *device,
|
|
|
|
|
NMAuthSubject *subject,
|
|
|
|
|
NMActivationType activation_type,
|
2018-03-28 17:18:04 +02:00
|
|
|
NMActivationReason activation_reason,
|
2017-12-05 12:35:38 +01:00
|
|
|
GError **error);
|
|
|
|
|
|
2018-06-28 18:05:05 +02:00
|
|
|
static void policy_activating_ac_changed (GObject *object, GParamSpec *pspec, gpointer user_data);
|
2017-12-05 12:35:38 +01:00
|
|
|
|
|
|
|
|
static gboolean find_master (NMManager *self,
|
|
|
|
|
NMConnection *connection,
|
|
|
|
|
NMDevice *device,
|
|
|
|
|
NMSettingsConnection **out_master_connection,
|
|
|
|
|
NMDevice **out_master_device,
|
|
|
|
|
NMActiveConnection **out_master_ac,
|
|
|
|
|
GError **error);
|
|
|
|
|
|
|
|
|
|
static void nm_manager_update_state (NMManager *manager);
|
|
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
static void connection_changed (NMManager *self,
|
|
|
|
|
NMSettingsConnection *sett_conn);
|
2017-12-05 12:35:38 +01:00
|
|
|
static void device_sleep_cb (NMDevice *device,
|
|
|
|
|
GParamSpec *pspec,
|
|
|
|
|
NMManager *self);
|
|
|
|
|
|
|
|
|
|
static void settings_startup_complete_changed (NMSettings *settings,
|
|
|
|
|
GParamSpec *pspec,
|
|
|
|
|
NMManager *self);
|
|
|
|
|
|
|
|
|
|
static void retry_connections_for_parent_device (NMManager *self, NMDevice *device);
|
2016-09-26 16:01:27 +02:00
|
|
|
|
2012-08-22 17:11:31 -05:00
|
|
|
static void active_connection_state_changed (NMActiveConnection *active,
|
|
|
|
|
GParamSpec *pspec,
|
|
|
|
|
NMManager *self);
|
2014-06-06 15:30:24 -04:00
|
|
|
static void active_connection_default_changed (NMActiveConnection *active,
|
|
|
|
|
GParamSpec *pspec,
|
|
|
|
|
NMManager *self);
|
2016-03-24 15:20:44 +01:00
|
|
|
static void active_connection_parent_active (NMActiveConnection *active,
|
|
|
|
|
NMActiveConnection *parent_ac,
|
|
|
|
|
NMManager *self);
|
2012-08-22 17:11:31 -05:00
|
|
|
|
2018-04-19 15:27:54 +02:00
|
|
|
static NMActiveConnection *active_connection_find (NMManager *self,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *sett_conn,
|
2018-04-19 15:27:54 +02:00
|
|
|
const char *uuid,
|
|
|
|
|
NMActiveConnectionState max_state,
|
|
|
|
|
GPtrArray **out_all_matching);
|
2017-12-05 13:55:25 +01:00
|
|
|
|
2018-04-10 15:55:16 +02:00
|
|
|
static NMConnectivity *concheck_get_mgr (NMManager *self);
|
|
|
|
|
|
2018-04-18 09:06:54 +02:00
|
|
|
static void _internal_activation_auth_done (NMManager *self,
|
|
|
|
|
NMActiveConnection *active,
|
|
|
|
|
gboolean success,
|
|
|
|
|
const char *error_desc);
|
|
|
|
|
static void _add_and_activate_auth_done (NMManager *self,
|
|
|
|
|
NMActiveConnection *active,
|
|
|
|
|
NMConnection *connection,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
2018-10-25 16:54:37 +02:00
|
|
|
NMSettingsConnectionPersistMode persist,
|
2018-04-18 09:06:54 +02:00
|
|
|
gboolean success,
|
|
|
|
|
const char *error_desc);
|
|
|
|
|
static void _activation_auth_done (NMManager *self,
|
|
|
|
|
NMActiveConnection *active,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
gboolean success,
|
|
|
|
|
const char *error_desc);
|
|
|
|
|
|
2017-12-05 12:35:38 +01:00
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
|
|
|
|
static NM_CACHED_QUARK_FCN ("autoconnect-root", autoconnect_root_quark)
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2018-04-12 11:19:06 +02:00
|
|
|
static gboolean
|
|
|
|
|
_connection_is_vpn (NMConnection *connection)
|
|
|
|
|
{
|
|
|
|
|
const char *type;
|
|
|
|
|
|
|
|
|
|
type = nm_connection_get_connection_type (connection);
|
|
|
|
|
if (type)
|
|
|
|
|
return nm_streq (type, NM_SETTING_VPN_SETTING_NAME);
|
|
|
|
|
|
|
|
|
|
/* we have an incomplete (invalid) connection at hand. That can only
|
|
|
|
|
* happen during AddAndActivate. Determine whether it's VPN type based
|
2018-09-15 07:20:54 -04:00
|
|
|
* on the existence of a [vpn] section. */
|
2018-04-12 11:19:06 +02:00
|
|
|
return !!nm_connection_get_setting_vpn (connection);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2018-04-10 15:55:16 +02:00
|
|
|
static gboolean
|
|
|
|
|
concheck_enabled (NMManager *self, gboolean *out_changed)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
guint check_enabled;
|
|
|
|
|
|
|
|
|
|
check_enabled = nm_connectivity_check_enabled (concheck_get_mgr (self))
|
|
|
|
|
? 1 : 2;
|
|
|
|
|
if (priv->connectivity_check_enabled_last == check_enabled)
|
|
|
|
|
NM_SET_OUT (out_changed, FALSE);
|
|
|
|
|
else {
|
|
|
|
|
NM_SET_OUT (out_changed, TRUE);
|
|
|
|
|
priv->connectivity_check_enabled_last = check_enabled;
|
|
|
|
|
}
|
|
|
|
|
return check_enabled == 1;
|
|
|
|
|
}
|
|
|
|
|
|
connectivity: schedule connectivity timers per-device and probe for short outages
It might happen, that connectivitiy is lost only for a moment and
returns soon after. Based on that assumption, when we loose connectivity
we want to have a probe interval where we check for returning
connectivity more frequently.
For that, we handle tracking of the timeouts per-device.
The intervall shall start with 1 seconds, and double the interval time until
the full interval is reached. Actually, due to the implementation, it's unlikely
that we already perform the second check 1 second later. That is because commonly
the first check returns before the one second timeout is reached and bumps the
interval to 2 seconds right away.
Also, we go through extra lengths so that manual connectivity check
delay the periodic checks. By being more smart about that, we can reduce
the number of connectivity checks, but still keeping the promise to
check at least within the requested interval.
The complexity of book keeping the timeouts is remarkable. But I think
it is worth the effort and we should try hard to
- have a connectivity state as accurate as possible. Clearly,
connectivity checking means that we probing, so being more intelligent
about timeout and backoff timers can result in a better connectivity
state. The connectivity state is important because we use it for
the default-route penaly and the GUI indicates bad connectivity.
- be intelligent about avoiding redundant connectivity checks. While
we want to check often to get an accurate connectivity state, we
also want to minimize the number of HTTP requests, in case the
connectivity is established and suppossedly stable.
Also, perform connectivity checks in every state of the device.
Even if a device is disconnected, it still might have connectivity,
for example if the user externally adds an IP address on an unmanaged
device.
https://bugzilla.gnome.org/show_bug.cgi?id=792240
2018-02-20 21:41:14 +01:00
|
|
|
static void
|
|
|
|
|
concheck_config_changed_cb (NMConnectivity *connectivity,
|
|
|
|
|
NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
NMDevice *device;
|
2018-04-10 15:55:16 +02:00
|
|
|
gboolean changed;
|
|
|
|
|
|
|
|
|
|
concheck_enabled (self, &changed);
|
|
|
|
|
if (changed)
|
|
|
|
|
_notify (self, PROP_CONNECTIVITY_CHECK_ENABLED);
|
connectivity: schedule connectivity timers per-device and probe for short outages
It might happen, that connectivitiy is lost only for a moment and
returns soon after. Based on that assumption, when we loose connectivity
we want to have a probe interval where we check for returning
connectivity more frequently.
For that, we handle tracking of the timeouts per-device.
The intervall shall start with 1 seconds, and double the interval time until
the full interval is reached. Actually, due to the implementation, it's unlikely
that we already perform the second check 1 second later. That is because commonly
the first check returns before the one second timeout is reached and bumps the
interval to 2 seconds right away.
Also, we go through extra lengths so that manual connectivity check
delay the periodic checks. By being more smart about that, we can reduce
the number of connectivity checks, but still keeping the promise to
check at least within the requested interval.
The complexity of book keeping the timeouts is remarkable. But I think
it is worth the effort and we should try hard to
- have a connectivity state as accurate as possible. Clearly,
connectivity checking means that we probing, so being more intelligent
about timeout and backoff timers can result in a better connectivity
state. The connectivity state is important because we use it for
the default-route penaly and the GUI indicates bad connectivity.
- be intelligent about avoiding redundant connectivity checks. While
we want to check often to get an accurate connectivity state, we
also want to minimize the number of HTTP requests, in case the
connectivity is established and suppossedly stable.
Also, perform connectivity checks in every state of the device.
Even if a device is disconnected, it still might have connectivity,
for example if the user externally adds an IP address on an unmanaged
device.
https://bugzilla.gnome.org/show_bug.cgi?id=792240
2018-02-20 21:41:14 +01:00
|
|
|
|
|
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst)
|
|
|
|
|
nm_device_check_connectivity_update_interval (device);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static NMConnectivity *
|
|
|
|
|
concheck_get_mgr (NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
|
|
|
|
|
if (G_UNLIKELY (!priv->concheck_mgr)) {
|
|
|
|
|
priv->concheck_mgr = g_object_ref (nm_connectivity_get ());
|
|
|
|
|
g_signal_connect (priv->concheck_mgr,
|
|
|
|
|
NM_CONNECTIVITY_CONFIG_CHANGED,
|
|
|
|
|
G_CALLBACK (concheck_config_changed_cb),
|
|
|
|
|
self);
|
|
|
|
|
}
|
|
|
|
|
return priv->concheck_mgr;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2018-04-18 09:06:54 +02:00
|
|
|
static AsyncOpData *
|
|
|
|
|
_async_op_data_new_authorize_activate_internal (NMManager *self, NMActiveConnection *active_take)
|
|
|
|
|
{
|
|
|
|
|
AsyncOpData *async_op_data;
|
|
|
|
|
|
|
|
|
|
async_op_data = g_slice_new0 (AsyncOpData);
|
|
|
|
|
async_op_data->async_op_type = ASYNC_OP_TYPE_AC_AUTH_ACTIVATE_INTERNAL;
|
|
|
|
|
async_op_data->self = g_object_ref (self);
|
|
|
|
|
async_op_data->ac_auth.active = active_take;
|
|
|
|
|
c_list_link_tail (&NM_MANAGER_GET_PRIVATE (self)->async_op_lst_head, &async_op_data->async_op_lst);
|
|
|
|
|
return async_op_data;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static AsyncOpData *
|
|
|
|
|
_async_op_data_new_ac_auth_activate_user (NMManager *self,
|
|
|
|
|
NMActiveConnection *active_take,
|
|
|
|
|
GDBusMethodInvocation *invocation_take)
|
|
|
|
|
{
|
|
|
|
|
AsyncOpData *async_op_data;
|
|
|
|
|
|
|
|
|
|
async_op_data = g_slice_new0 (AsyncOpData);
|
|
|
|
|
async_op_data->async_op_type = ASYNC_OP_TYPE_AC_AUTH_ACTIVATE_USER;
|
|
|
|
|
async_op_data->self = g_object_ref (self);
|
|
|
|
|
async_op_data->ac_auth.active = active_take;
|
|
|
|
|
async_op_data->ac_auth.activate_user.invocation = invocation_take;
|
|
|
|
|
c_list_link_tail (&NM_MANAGER_GET_PRIVATE (self)->async_op_lst_head, &async_op_data->async_op_lst);
|
|
|
|
|
return async_op_data;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static AsyncOpData *
|
|
|
|
|
_async_op_data_new_ac_auth_add_and_activate (NMManager *self,
|
|
|
|
|
NMActiveConnection *active_take,
|
|
|
|
|
GDBusMethodInvocation *invocation_take,
|
2018-10-25 16:54:37 +02:00
|
|
|
NMConnection *connection_take,
|
|
|
|
|
NMSettingsConnectionPersistMode persist)
|
2018-04-18 09:06:54 +02:00
|
|
|
{
|
|
|
|
|
AsyncOpData *async_op_data;
|
|
|
|
|
|
|
|
|
|
async_op_data = g_slice_new0 (AsyncOpData);
|
|
|
|
|
async_op_data->async_op_type = ASYNC_OP_TYPE_AC_AUTH_ADD_AND_ACTIVATE;
|
|
|
|
|
async_op_data->self = g_object_ref (self);
|
|
|
|
|
async_op_data->ac_auth.active = active_take;
|
|
|
|
|
async_op_data->ac_auth.add_and_activate.invocation = invocation_take;
|
|
|
|
|
async_op_data->ac_auth.add_and_activate.connection = connection_take;
|
2018-10-25 16:54:37 +02:00
|
|
|
async_op_data->ac_auth.add_and_activate.persist = persist;
|
2018-04-18 09:06:54 +02:00
|
|
|
c_list_link_tail (&NM_MANAGER_GET_PRIVATE (self)->async_op_lst_head, &async_op_data->async_op_lst);
|
|
|
|
|
return async_op_data;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
_async_op_complete_ac_auth_cb (NMActiveConnection *active,
|
|
|
|
|
gboolean success,
|
|
|
|
|
const char *error_desc,
|
|
|
|
|
gpointer user_data)
|
|
|
|
|
{
|
|
|
|
|
AsyncOpData *async_op_data = user_data;
|
|
|
|
|
|
|
|
|
|
nm_assert (async_op_data);
|
|
|
|
|
nm_assert (NM_IS_MANAGER (async_op_data->self));
|
|
|
|
|
nm_assert (nm_c_list_contains_entry (&NM_MANAGER_GET_PRIVATE (async_op_data->self)->async_op_lst_head, async_op_data, async_op_lst));
|
|
|
|
|
nm_assert (NM_IS_ACTIVE_CONNECTION (active));
|
|
|
|
|
nm_assert (active == async_op_data->ac_auth.active);
|
|
|
|
|
|
|
|
|
|
c_list_unlink (&async_op_data->async_op_lst);
|
|
|
|
|
|
|
|
|
|
switch (async_op_data->async_op_type) {
|
|
|
|
|
case ASYNC_OP_TYPE_AC_AUTH_ACTIVATE_INTERNAL:
|
|
|
|
|
_internal_activation_auth_done (async_op_data->self,
|
|
|
|
|
async_op_data->ac_auth.active,
|
|
|
|
|
success,
|
|
|
|
|
error_desc);
|
|
|
|
|
break;
|
|
|
|
|
case ASYNC_OP_TYPE_AC_AUTH_ACTIVATE_USER:
|
|
|
|
|
_activation_auth_done (async_op_data->self,
|
|
|
|
|
async_op_data->ac_auth.active,
|
|
|
|
|
async_op_data->ac_auth.activate_user.invocation,
|
|
|
|
|
success,
|
|
|
|
|
error_desc);
|
|
|
|
|
break;
|
|
|
|
|
case ASYNC_OP_TYPE_AC_AUTH_ADD_AND_ACTIVATE:
|
|
|
|
|
_add_and_activate_auth_done (async_op_data->self,
|
|
|
|
|
async_op_data->ac_auth.active,
|
|
|
|
|
async_op_data->ac_auth.add_and_activate.connection,
|
|
|
|
|
async_op_data->ac_auth.add_and_activate.invocation,
|
2018-10-25 16:54:37 +02:00
|
|
|
async_op_data->ac_auth.add_and_activate.persist,
|
2018-04-18 09:06:54 +02:00
|
|
|
success,
|
|
|
|
|
error_desc);
|
|
|
|
|
g_object_unref (async_op_data->ac_auth.add_and_activate.connection);
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
nm_assert_not_reached ();
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
g_object_unref (async_op_data->ac_auth.active);
|
|
|
|
|
g_object_unref (async_op_data->self);
|
|
|
|
|
g_slice_free (AsyncOpData, async_op_data);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
typedef struct {
|
|
|
|
|
int ifindex;
|
|
|
|
|
guint32 aspired_metric;
|
|
|
|
|
guint32 effective_metric;
|
|
|
|
|
} DeviceRouteMetricData;
|
|
|
|
|
|
|
|
|
|
static DeviceRouteMetricData *
|
2017-12-20 12:45:02 +01:00
|
|
|
_device_route_metric_data_new (int ifindex, guint32 aspired_metric, guint32 effective_metric)
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
{
|
|
|
|
|
DeviceRouteMetricData *data;
|
|
|
|
|
|
|
|
|
|
nm_assert (ifindex > 0);
|
|
|
|
|
|
|
|
|
|
/* For IPv4, metrics can use the entire uint32 bit range. For IPv6,
|
|
|
|
|
* zero is treated like 1024. Since we handle IPv4 and IPv6 identically,
|
|
|
|
|
* we cannot allow a zero metric here.
|
|
|
|
|
*/
|
2017-12-20 12:45:02 +01:00
|
|
|
nm_assert (aspired_metric > 0);
|
|
|
|
|
nm_assert (effective_metric == 0 || aspired_metric <= effective_metric);
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
|
|
|
|
|
data = g_slice_new0 (DeviceRouteMetricData);
|
|
|
|
|
data->ifindex = ifindex;
|
2017-12-20 12:45:02 +01:00
|
|
|
data->aspired_metric = aspired_metric;
|
|
|
|
|
data->effective_metric = effective_metric ?: aspired_metric;
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
return data;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static guint
|
|
|
|
|
_device_route_metric_data_by_ifindex_hash (gconstpointer p)
|
|
|
|
|
{
|
|
|
|
|
const DeviceRouteMetricData *data = p;
|
|
|
|
|
NMHashState h;
|
|
|
|
|
|
|
|
|
|
nm_hash_init (&h, 1030338191);
|
|
|
|
|
nm_hash_update_vals (&h, data->ifindex);
|
|
|
|
|
return nm_hash_complete (&h);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
_device_route_metric_data_by_ifindex_equal (gconstpointer pa, gconstpointer pb)
|
|
|
|
|
{
|
|
|
|
|
const DeviceRouteMetricData *a = pa;
|
|
|
|
|
const DeviceRouteMetricData *b = pb;
|
|
|
|
|
|
|
|
|
|
return a->ifindex == b->ifindex;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static guint32
|
|
|
|
|
_device_route_metric_get (NMManager *self,
|
|
|
|
|
int ifindex,
|
|
|
|
|
NMDeviceType device_type,
|
2017-12-20 12:45:02 +01:00
|
|
|
gboolean lookup_only,
|
|
|
|
|
guint32 *out_aspired_metric)
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv;
|
|
|
|
|
const DeviceRouteMetricData *d2;
|
|
|
|
|
DeviceRouteMetricData *data;
|
|
|
|
|
DeviceRouteMetricData data_lookup;
|
|
|
|
|
const NMDedupMultiHeadEntry *all_links_head;
|
|
|
|
|
NMPObject links_needle;
|
|
|
|
|
guint n_links;
|
|
|
|
|
gboolean cleaned = FALSE;
|
|
|
|
|
GHashTableIter h_iter;
|
2017-12-20 12:45:02 +01:00
|
|
|
guint32 metric;
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
|
|
|
|
|
g_return_val_if_fail (NM_IS_MANAGER (self), 0);
|
|
|
|
|
|
2017-12-20 12:45:02 +01:00
|
|
|
NM_SET_OUT (out_aspired_metric, 0);
|
|
|
|
|
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
if (ifindex <= 0) {
|
|
|
|
|
if (lookup_only)
|
|
|
|
|
return 0;
|
2017-12-20 12:45:02 +01:00
|
|
|
metric = nm_device_get_route_metric_default (device_type);
|
|
|
|
|
NM_SET_OUT (out_aspired_metric, metric);
|
|
|
|
|
return metric;
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
|
|
|
|
|
if ( lookup_only
|
|
|
|
|
&& !priv->device_route_metrics)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
if (G_UNLIKELY (!priv->device_route_metrics)) {
|
|
|
|
|
const GHashTable *h;
|
|
|
|
|
const NMConfigDeviceStateData *device_state;
|
|
|
|
|
|
|
|
|
|
priv->device_route_metrics = g_hash_table_new_full (_device_route_metric_data_by_ifindex_hash,
|
|
|
|
|
_device_route_metric_data_by_ifindex_equal,
|
|
|
|
|
NULL,
|
|
|
|
|
nm_g_slice_free_fcn (DeviceRouteMetricData));
|
|
|
|
|
cleaned = TRUE;
|
|
|
|
|
|
|
|
|
|
/* we need to pre-populate the cache for all (still existing) devices from the state-file */
|
|
|
|
|
h = nm_config_device_state_get_all (priv->config);
|
|
|
|
|
if (!h)
|
|
|
|
|
goto initited;
|
|
|
|
|
|
|
|
|
|
g_hash_table_iter_init (&h_iter, (GHashTable *) h);
|
|
|
|
|
while (g_hash_table_iter_next (&h_iter, NULL, (gpointer *) &device_state)) {
|
2017-12-20 12:45:02 +01:00
|
|
|
if (!device_state->route_metric_default_effective)
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
continue;
|
|
|
|
|
if (!nm_platform_link_get (priv->platform, device_state->ifindex)) {
|
|
|
|
|
/* we have the entry in the state file, but (currently) no such
|
|
|
|
|
* ifindex exists in platform. Most likely the entry is obsolete,
|
|
|
|
|
* hence we skip it. */
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2018-01-02 15:47:37 +01:00
|
|
|
if (!g_hash_table_add (priv->device_route_metrics,
|
|
|
|
|
_device_route_metric_data_new (device_state->ifindex,
|
|
|
|
|
device_state->route_metric_default_aspired,
|
|
|
|
|
device_state->route_metric_default_effective)))
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
nm_assert_not_reached ();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
initited:
|
|
|
|
|
data_lookup.ifindex = ifindex;
|
|
|
|
|
|
|
|
|
|
data = g_hash_table_lookup (priv->device_route_metrics, &data_lookup);
|
|
|
|
|
if (data)
|
2017-12-20 12:45:02 +01:00
|
|
|
goto out;
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
if (lookup_only)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
if (!cleaned) {
|
|
|
|
|
/* get the number of all links in the platform cache. */
|
|
|
|
|
all_links_head = nm_platform_lookup_all (priv->platform,
|
|
|
|
|
NMP_CACHE_ID_TYPE_OBJECT_TYPE,
|
|
|
|
|
nmp_object_stackinit_id_link (&links_needle, 1));
|
|
|
|
|
n_links = all_links_head ? all_links_head->len : 0;
|
|
|
|
|
|
|
|
|
|
/* on systems where a lot of devices are created and go away, the index contains
|
|
|
|
|
* a lot of stale entries. We must from time to time clean them up.
|
|
|
|
|
*
|
|
|
|
|
* Do do this cleanup, whenever we have more enties then 2 times the number of links. */
|
|
|
|
|
if (G_UNLIKELY (g_hash_table_size (priv->device_route_metrics) > NM_MAX (20, n_links * 2))) {
|
|
|
|
|
/* from time to time, we need to do some house-keeping and prune stale entries.
|
|
|
|
|
* Otherwise, on a system where interfaces frequently come and go (docker), we
|
|
|
|
|
* keep growing this cache for ifindexes that no longer exist. */
|
|
|
|
|
g_hash_table_iter_init (&h_iter, priv->device_route_metrics);
|
|
|
|
|
while (g_hash_table_iter_next (&h_iter, NULL, (gpointer *) &d2)) {
|
|
|
|
|
if (!nm_platform_link_get (priv->platform, d2->ifindex))
|
|
|
|
|
g_hash_table_iter_remove (&h_iter);
|
|
|
|
|
}
|
|
|
|
|
cleaned = TRUE;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-20 12:45:02 +01:00
|
|
|
data = _device_route_metric_data_new (ifindex, nm_device_get_route_metric_default (device_type), 0);
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
|
|
|
|
|
/* unfortunately, there is no stright forward way to lookup all reserved metrics.
|
|
|
|
|
* Note, that we don't only have to know which metrics are currently reserved,
|
|
|
|
|
* but also, which metrics are now seemingly un-used but caused another reserved
|
2017-12-19 10:10:15 +01:00
|
|
|
* metric to be bumped. Hence, the naive O(n^2) search :(
|
|
|
|
|
*
|
|
|
|
|
* Well, technically, since we limit bumping the metric to 50, this entire
|
|
|
|
|
* loop runs at most 50 times, so it's still O(n). Let's just say, it's not
|
|
|
|
|
* very efficient. */
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
again:
|
|
|
|
|
g_hash_table_iter_init (&h_iter, priv->device_route_metrics);
|
|
|
|
|
while (g_hash_table_iter_next (&h_iter, NULL, (gpointer *) &d2)) {
|
|
|
|
|
if ( data->effective_metric < d2->aspired_metric
|
|
|
|
|
|| data->effective_metric > d2->effective_metric) {
|
|
|
|
|
/* no overlap. Skip. */
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
if ( !cleaned
|
|
|
|
|
&& !nm_platform_link_get (priv->platform, d2->ifindex)) {
|
|
|
|
|
/* the metric seems taken, but there is no such interface. This entry
|
|
|
|
|
* is stale, forget about it. */
|
|
|
|
|
g_hash_table_iter_remove (&h_iter);
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2017-12-19 10:10:15 +01:00
|
|
|
|
|
|
|
|
if (d2->effective_metric == G_MAXUINT32) {
|
|
|
|
|
/* we cannot bump the metric any further. Done.
|
|
|
|
|
*
|
|
|
|
|
* Actually, this can currently not happen because the aspired_metric
|
|
|
|
|
* are small numbers and we limit the bumping to 50. Still, for
|
|
|
|
|
* completeness... */
|
|
|
|
|
data->effective_metric = G_MAXUINT32;
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-19 10:10:15 +01:00
|
|
|
if (d2->effective_metric - data->aspired_metric >= 50) {
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
/* as one active interface reserves an entire range of metrics
|
|
|
|
|
* (from aspired_metric to effective_metric), that means if you
|
|
|
|
|
* alternatingly activate two interfaces, their metric will
|
2017-12-19 10:10:15 +01:00
|
|
|
* bump each other.
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
*
|
2017-12-19 10:10:15 +01:00
|
|
|
* Limit this, bump the metric at most 50 points. */
|
|
|
|
|
data->effective_metric = data->aspired_metric + 50;
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* bump the metric, and search again. */
|
2017-12-19 10:10:15 +01:00
|
|
|
data->effective_metric = d2->effective_metric + 1;
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
goto again;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
_LOGT (LOGD_DEVICE, "default-route-metric: ifindex %d reserves metric %u (aspired %u)",
|
|
|
|
|
data->ifindex, data->effective_metric, data->aspired_metric);
|
|
|
|
|
|
2018-01-02 15:47:37 +01:00
|
|
|
if (!g_hash_table_add (priv->device_route_metrics, data))
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
nm_assert_not_reached ();
|
|
|
|
|
|
2017-12-20 12:45:02 +01:00
|
|
|
out:
|
|
|
|
|
NM_SET_OUT (out_aspired_metric, data->aspired_metric);
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
return data->effective_metric;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
guint32
|
|
|
|
|
nm_manager_device_route_metric_reserve (NMManager *self,
|
|
|
|
|
int ifindex,
|
|
|
|
|
NMDeviceType device_type)
|
|
|
|
|
{
|
|
|
|
|
guint32 metric;
|
|
|
|
|
|
2017-12-20 12:45:02 +01:00
|
|
|
metric = _device_route_metric_get (self, ifindex, device_type, FALSE, NULL);
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
nm_assert (metric != 0);
|
|
|
|
|
return metric;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nm_manager_device_route_metric_clear (NMManager *self,
|
|
|
|
|
int ifindex)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv;
|
|
|
|
|
DeviceRouteMetricData data_lookup;
|
|
|
|
|
|
|
|
|
|
priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
|
|
|
|
|
if (!priv->device_route_metrics)
|
|
|
|
|
return;
|
|
|
|
|
data_lookup.ifindex = ifindex;
|
|
|
|
|
if (g_hash_table_remove (priv->device_route_metrics, &data_lookup)) {
|
|
|
|
|
_LOGT (LOGD_DEVICE, "default-route-metric: ifindex %d released",
|
|
|
|
|
ifindex);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2017-12-05 13:55:25 +01:00
|
|
|
static void
|
|
|
|
|
_delete_volatile_connection_do (NMManager *self,
|
|
|
|
|
NMSettingsConnection *connection)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
|
|
|
|
|
if (!NM_FLAGS_HAS (nm_settings_connection_get_flags (connection),
|
2018-04-05 20:02:45 +02:00
|
|
|
NM_SETTINGS_CONNECTION_INT_FLAGS_VOLATILE))
|
2017-12-05 13:55:25 +01:00
|
|
|
return;
|
2018-04-19 15:27:54 +02:00
|
|
|
if (active_connection_find (self,
|
|
|
|
|
connection,
|
|
|
|
|
NULL,
|
|
|
|
|
NM_ACTIVE_CONNECTION_STATE_DEACTIVATED,
|
|
|
|
|
NULL))
|
2017-12-05 13:55:25 +01:00
|
|
|
return;
|
|
|
|
|
if (!nm_settings_has_connection (priv->settings, connection))
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
_LOGD (LOGD_DEVICE, "volatile connection disconnected. Deleting connection '%s' (%s)",
|
|
|
|
|
nm_settings_connection_get_id (connection), nm_settings_connection_get_uuid (connection));
|
|
|
|
|
nm_settings_connection_delete (connection, NULL);
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-28 16:19:20 -05:00
|
|
|
/* Returns: whether to notify D-Bus of the removal or not */
|
|
|
|
|
static gboolean
|
|
|
|
|
active_connection_remove (NMManager *self, NMActiveConnection *active)
|
2013-08-30 17:57:56 -05:00
|
|
|
{
|
2013-08-28 16:19:20 -05:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2017-12-05 13:55:25 +01:00
|
|
|
gs_unref_object NMSettingsConnection *connection = NULL;
|
2017-11-23 21:30:09 +01:00
|
|
|
gboolean notify;
|
2014-06-20 20:13:14 +02:00
|
|
|
|
2017-11-23 21:30:09 +01:00
|
|
|
nm_assert (NM_IS_ACTIVE_CONNECTION (active));
|
|
|
|
|
nm_assert (c_list_contains (&priv->active_connections_lst_head, &active->active_connections_lst));
|
2014-06-20 20:13:14 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
notify = nm_dbus_object_is_exported (NM_DBUS_OBJECT (active));
|
2017-11-23 21:30:09 +01:00
|
|
|
|
2017-11-28 11:22:01 +01:00
|
|
|
c_list_unlink (&active->active_connections_lst);
|
2017-11-23 21:30:09 +01:00
|
|
|
g_signal_emit (self, signals[ACTIVE_CONNECTION_REMOVED], 0, active);
|
|
|
|
|
g_signal_handlers_disconnect_by_func (active, active_connection_state_changed, self);
|
|
|
|
|
g_signal_handlers_disconnect_by_func (active, active_connection_default_changed, self);
|
|
|
|
|
g_signal_handlers_disconnect_by_func (active, active_connection_parent_active, self);
|
|
|
|
|
|
2017-12-05 13:55:25 +01:00
|
|
|
connection = nm_g_object_ref (nm_active_connection_get_settings_connection (active));
|
2017-11-23 21:30:09 +01:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
nm_dbus_object_clear_and_unexport (&active);
|
2017-11-23 21:30:09 +01:00
|
|
|
|
2017-12-05 13:55:25 +01:00
|
|
|
if (connection)
|
|
|
|
|
_delete_volatile_connection_do (self, connection);
|
2013-08-28 16:19:20 -05:00
|
|
|
|
2017-11-23 21:30:09 +01:00
|
|
|
return notify;
|
2013-08-30 17:57:56 -05:00
|
|
|
}
|
|
|
|
|
|
2012-08-22 17:11:31 -05:00
|
|
|
static gboolean
|
|
|
|
|
_active_connection_cleanup (gpointer user_data)
|
|
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER (user_data);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2017-11-23 21:30:09 +01:00
|
|
|
NMActiveConnection *ac, *ac_safe;
|
2012-08-22 17:11:31 -05:00
|
|
|
|
|
|
|
|
priv->ac_cleanup_id = 0;
|
|
|
|
|
|
2013-08-28 16:19:20 -05:00
|
|
|
g_object_freeze_notify (G_OBJECT (self));
|
2017-11-23 21:30:09 +01:00
|
|
|
c_list_for_each_entry_safe (ac, ac_safe, &priv->active_connections_lst_head, active_connections_lst) {
|
2012-08-22 17:11:31 -05:00
|
|
|
if (nm_active_connection_get_state (ac) == NM_ACTIVE_CONNECTION_STATE_DEACTIVATED) {
|
2013-08-28 16:19:20 -05:00
|
|
|
if (active_connection_remove (self, ac))
|
2016-04-01 17:34:51 +02:00
|
|
|
_notify (self, PROP_ACTIVE_CONNECTIONS);
|
2012-08-22 17:11:31 -05:00
|
|
|
}
|
|
|
|
|
}
|
2013-08-28 16:19:20 -05:00
|
|
|
g_object_thaw_notify (G_OBJECT (self));
|
2012-08-22 17:11:31 -05:00
|
|
|
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
active_connection_state_changed (NMActiveConnection *active,
|
|
|
|
|
GParamSpec *pspec,
|
|
|
|
|
NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
NMActiveConnectionState state;
|
2016-09-26 16:01:27 +02:00
|
|
|
NMSettingsConnection *con;
|
2012-08-22 17:11:31 -05:00
|
|
|
|
|
|
|
|
state = nm_active_connection_get_state (active);
|
|
|
|
|
if (state == NM_ACTIVE_CONNECTION_STATE_DEACTIVATED) {
|
|
|
|
|
/* Destroy active connections from an idle handler to ensure that
|
|
|
|
|
* their last property change notifications go out, which wouldn't
|
|
|
|
|
* happen if we destroyed them immediately when their state was set
|
|
|
|
|
* to DEACTIVATED.
|
|
|
|
|
*/
|
|
|
|
|
if (!priv->ac_cleanup_id)
|
|
|
|
|
priv->ac_cleanup_id = g_idle_add (_active_connection_cleanup, self);
|
2016-09-26 16:01:27 +02:00
|
|
|
|
|
|
|
|
con = nm_active_connection_get_settings_connection (active);
|
|
|
|
|
if (con)
|
|
|
|
|
g_object_set_qdata (G_OBJECT (con), autoconnect_root_quark (), NULL);
|
2012-08-22 17:11:31 -05:00
|
|
|
}
|
2013-11-08 12:23:43 -05:00
|
|
|
|
|
|
|
|
nm_manager_update_state (self);
|
2012-08-22 17:11:31 -05:00
|
|
|
}
|
|
|
|
|
|
2014-06-06 15:30:24 -04:00
|
|
|
static void
|
|
|
|
|
active_connection_default_changed (NMActiveConnection *active,
|
|
|
|
|
GParamSpec *pspec,
|
|
|
|
|
NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
nm_manager_update_state (self);
|
|
|
|
|
}
|
|
|
|
|
|
2014-02-26 16:04:45 -06:00
|
|
|
/**
|
|
|
|
|
* active_connection_add():
|
|
|
|
|
* @self: the #NMManager
|
|
|
|
|
* @active: the #NMActiveConnection to manage
|
|
|
|
|
*
|
|
|
|
|
* Begins to track and manage @active. Increases the refcount of @active.
|
|
|
|
|
*/
|
2012-08-22 17:11:31 -05:00
|
|
|
static void
|
2018-02-05 15:17:06 +01:00
|
|
|
active_connection_add (NMManager *self,
|
|
|
|
|
NMActiveConnection *active)
|
2012-08-22 17:11:31 -05:00
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
|
2017-11-23 21:30:09 +01:00
|
|
|
nm_assert (NM_IS_ACTIVE_CONNECTION (active));
|
|
|
|
|
nm_assert (!c_list_is_linked (&active->active_connections_lst));
|
2012-08-22 17:11:31 -05:00
|
|
|
|
2017-11-23 21:30:09 +01:00
|
|
|
c_list_link_front (&priv->active_connections_lst_head, &active->active_connections_lst);
|
|
|
|
|
g_object_ref (active);
|
2014-02-26 16:04:45 -06:00
|
|
|
|
|
|
|
|
g_signal_connect (active,
|
|
|
|
|
"notify::" NM_ACTIVE_CONNECTION_STATE,
|
2012-08-22 17:11:31 -05:00
|
|
|
G_CALLBACK (active_connection_state_changed),
|
|
|
|
|
self);
|
2014-06-06 15:30:24 -04:00
|
|
|
g_signal_connect (active,
|
|
|
|
|
"notify::" NM_ACTIVE_CONNECTION_DEFAULT,
|
|
|
|
|
G_CALLBACK (active_connection_default_changed),
|
|
|
|
|
self);
|
|
|
|
|
g_signal_connect (active,
|
|
|
|
|
"notify::" NM_ACTIVE_CONNECTION_DEFAULT6,
|
|
|
|
|
G_CALLBACK (active_connection_default_changed),
|
|
|
|
|
self);
|
2012-08-22 18:33:17 -05:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
if (!nm_dbus_object_is_exported (NM_DBUS_OBJECT (active)))
|
|
|
|
|
nm_dbus_object_export (NM_DBUS_OBJECT (active));
|
2018-02-05 15:17:06 +01:00
|
|
|
|
2012-08-22 18:33:17 -05:00
|
|
|
g_signal_emit (self, signals[ACTIVE_CONNECTION_ADDED], 0, active);
|
2013-08-28 16:19:20 -05:00
|
|
|
|
2018-02-05 15:17:06 +01:00
|
|
|
_notify (self, PROP_ACTIVE_CONNECTIONS);
|
2012-08-22 17:11:31 -05:00
|
|
|
}
|
|
|
|
|
|
2017-11-23 21:30:09 +01:00
|
|
|
const CList *
|
2012-08-22 17:21:56 -05:00
|
|
|
nm_manager_get_active_connections (NMManager *manager)
|
|
|
|
|
{
|
2017-11-23 21:30:09 +01:00
|
|
|
return &NM_MANAGER_GET_PRIVATE (manager)->active_connections_lst_head;
|
2012-08-22 17:21:56 -05:00
|
|
|
}
|
|
|
|
|
|
2013-09-12 10:28:21 -04:00
|
|
|
static NMActiveConnection *
|
2018-04-19 15:27:54 +02:00
|
|
|
active_connection_find (NMManager *self,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *sett_conn,
|
2018-04-19 15:27:54 +02:00
|
|
|
const char *uuid,
|
2018-04-19 15:42:27 +02:00
|
|
|
NMActiveConnectionState max_state /* candidates in state @max_state will be found */,
|
2018-04-19 15:27:54 +02:00
|
|
|
GPtrArray **out_all_matching)
|
2013-09-12 10:28:21 -04:00
|
|
|
{
|
2017-11-23 21:30:09 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
NMActiveConnection *ac;
|
2018-04-19 15:27:54 +02:00
|
|
|
NMActiveConnection *best_ac = NULL;
|
|
|
|
|
GPtrArray *all = NULL;
|
2015-07-14 16:53:24 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
nm_assert (!sett_conn || NM_IS_SETTINGS_CONNECTION (sett_conn));
|
2018-04-19 15:27:54 +02:00
|
|
|
nm_assert (!out_all_matching || !*out_all_matching);
|
2013-09-12 10:28:21 -04:00
|
|
|
|
2017-11-23 21:30:09 +01:00
|
|
|
c_list_for_each_entry (ac, &priv->active_connections_lst_head, active_connections_lst) {
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *ac_conn;
|
2013-09-12 10:28:21 -04:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
ac_conn = nm_active_connection_get_settings_connection (ac);
|
|
|
|
|
if ( sett_conn
|
|
|
|
|
&& sett_conn != ac_conn)
|
2017-03-13 14:01:47 +01:00
|
|
|
continue;
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if ( uuid
|
|
|
|
|
&& !nm_streq0 (uuid, nm_settings_connection_get_uuid (ac_conn)))
|
2017-03-13 14:01:47 +01:00
|
|
|
continue;
|
|
|
|
|
if (nm_active_connection_get_state (ac) > max_state)
|
|
|
|
|
continue;
|
2018-04-19 15:27:54 +02:00
|
|
|
|
|
|
|
|
if (!out_all_matching)
|
|
|
|
|
return ac;
|
|
|
|
|
|
|
|
|
|
if (!best_ac) {
|
|
|
|
|
best_ac = ac;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!all) {
|
|
|
|
|
all = g_ptr_array_new_with_free_func (g_object_unref);
|
|
|
|
|
g_ptr_array_add (all, g_object_ref (best_ac));
|
|
|
|
|
}
|
|
|
|
|
g_ptr_array_add (all, g_object_ref (ac));
|
2013-09-12 10:28:21 -04:00
|
|
|
}
|
|
|
|
|
|
2018-04-19 15:27:54 +02:00
|
|
|
if (!best_ac)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
/* as an optimization, we only allocate out_all_matching, if there are more
|
|
|
|
|
* than one result. If there is only one result, we only return the single
|
|
|
|
|
* element and don't bother allocating an array. That's the common case.
|
|
|
|
|
*
|
|
|
|
|
* Also, in case we have multiple results, we return the *first* one
|
|
|
|
|
* as @best_ac. */
|
|
|
|
|
nm_assert ( !all
|
|
|
|
|
|| ( all->len >= 2
|
|
|
|
|
&& all->pdata[0] == best_ac));
|
|
|
|
|
|
|
|
|
|
*out_all_matching = all;
|
|
|
|
|
return best_ac;
|
2013-09-12 10:28:21 -04:00
|
|
|
}
|
|
|
|
|
|
2017-03-13 14:01:47 +01:00
|
|
|
static NMActiveConnection *
|
2018-04-22 12:50:42 +02:00
|
|
|
active_connection_find_by_connection (NMManager *self,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *sett_conn,
|
2018-04-22 12:50:42 +02:00
|
|
|
NMConnection *connection,
|
|
|
|
|
NMActiveConnectionState max_state,
|
|
|
|
|
GPtrArray **out_all_matching)
|
2017-03-13 14:01:47 +01:00
|
|
|
{
|
|
|
|
|
nm_assert (NM_IS_MANAGER (self));
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
nm_assert (!sett_conn || NM_IS_SETTINGS_CONNECTION (sett_conn));
|
|
|
|
|
nm_assert (!connection || NM_IS_CONNECTION (connection));
|
|
|
|
|
nm_assert (sett_conn || connection);
|
|
|
|
|
nm_assert (!connection || !sett_conn || connection == nm_settings_connection_get_connection (sett_conn));
|
2017-03-13 14:01:47 +01:00
|
|
|
|
|
|
|
|
/* Depending on whether connection is a settings connection,
|
|
|
|
|
* either lookup by object-identity of @connection, or compare the UUID */
|
2018-04-19 15:27:54 +02:00
|
|
|
return active_connection_find (self,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
sett_conn,
|
|
|
|
|
sett_conn ? NULL : nm_connection_get_uuid (connection),
|
2018-04-22 12:50:42 +02:00
|
|
|
max_state,
|
|
|
|
|
out_all_matching);
|
2017-03-13 14:01:47 +01:00
|
|
|
}
|
|
|
|
|
|
2018-06-27 14:20:57 +02:00
|
|
|
typedef struct {
|
|
|
|
|
NMManager *self;
|
|
|
|
|
gboolean for_auto_activation;
|
|
|
|
|
} GetActivatableConnectionsFilterData;
|
|
|
|
|
|
2017-02-03 15:13:03 +01:00
|
|
|
static gboolean
|
|
|
|
|
_get_activatable_connections_filter (NMSettings *settings,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *sett_conn,
|
2017-02-03 15:13:03 +01:00
|
|
|
gpointer user_data)
|
|
|
|
|
{
|
2018-06-27 14:20:57 +02:00
|
|
|
const GetActivatableConnectionsFilterData *d = user_data;
|
|
|
|
|
NMConnectionMultiConnect multi_connect;
|
|
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (NM_FLAGS_HAS (nm_settings_connection_get_flags (sett_conn),
|
2018-04-05 20:02:45 +02:00
|
|
|
NM_SETTINGS_CONNECTION_INT_FLAGS_VOLATILE))
|
device: assume matching connections during first start
Since commit 2d1b85f (th/assume-vs-unmanaged-bgo746440), we clearly
distinguish between two modes when encountering devices with external
IP configuration:
a) external devices. For those devices we generate a volatile in-memory
connection and pretend it's active. However, the device must not be
touched by NetworkManager in any way.
b) assume, seamless take over. Mostly for restart of NetworkManager,
we activate a connection gracefully without going through an down-up
cycle. After the device reaches activated state, the device is
considered fully managed. For this only an existing, non volatile
connection can be used.
Before 'th/assume-vs-unmanaged-bgo746440', the behaviors were not
clearly separated.
Since then, we only choose to assume a connection (b) when the state
file indicates a matching connection. Now, extend this to also assume
connections when:
- during first-start (not after a restart) when there is no
state file yet.
- and, if we have an existing, non volatile, connection which
matches the device's configuration.
This patch lets NetworkManager assume connection also on first start.
That is for example useful when handing over network configuration from
initrd.
This only applies to existing, permanent, matching(!) connections, so it is a
good guess that the user wants NM to take over this interface. This brings us
closer to the previous behavior before 'th/assume-vs-unmanaged-bgo746440'.
https://bugzilla.redhat.com/show_bug.cgi?id=1439220
(cherry picked from commit 27b2477cb7dad2410c88c7dfca51f3aad208b881)
2017-04-19 16:16:12 +02:00
|
|
|
return FALSE;
|
2018-04-19 15:42:27 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
multi_connect = _nm_connection_get_multi_connect (nm_settings_connection_get_connection (sett_conn));
|
2018-06-27 14:20:57 +02:00
|
|
|
if ( multi_connect == NM_CONNECTION_MULTI_CONNECT_MULTIPLE
|
|
|
|
|
|| ( multi_connect == NM_CONNECTION_MULTI_CONNECT_MANUAL_MULTIPLE
|
|
|
|
|
&& !d->for_auto_activation))
|
|
|
|
|
return TRUE;
|
|
|
|
|
|
2018-04-19 15:42:27 +02:00
|
|
|
/* the connection is activatable, if it has no active-connections that are in state
|
|
|
|
|
* activated, activating, or waiting to be activated. */
|
2018-06-27 14:20:57 +02:00
|
|
|
return !active_connection_find (d->self,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
sett_conn,
|
2018-06-27 14:20:57 +02:00
|
|
|
NULL,
|
|
|
|
|
NM_ACTIVE_CONNECTION_STATE_ACTIVATED,
|
|
|
|
|
NULL);
|
2017-02-03 15:13:03 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
NMSettingsConnection **
|
2018-06-27 14:20:57 +02:00
|
|
|
nm_manager_get_activatable_connections (NMManager *manager,
|
|
|
|
|
gboolean for_auto_activation,
|
|
|
|
|
gboolean sort,
|
|
|
|
|
guint *out_len)
|
2013-09-12 10:28:21 -04:00
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (manager);
|
2018-06-27 14:20:57 +02:00
|
|
|
const GetActivatableConnectionsFilterData d = {
|
|
|
|
|
.self = manager,
|
|
|
|
|
.for_auto_activation = for_auto_activation,
|
|
|
|
|
};
|
2013-09-12 10:28:21 -04:00
|
|
|
|
2017-11-22 12:46:58 +01:00
|
|
|
return nm_settings_get_connections_clone (priv->settings, out_len,
|
|
|
|
|
_get_activatable_connections_filter,
|
2018-06-27 14:20:57 +02:00
|
|
|
(gpointer) &d,
|
2017-11-22 12:46:58 +01:00
|
|
|
sort ? nm_settings_connection_cmp_autoconnect_priority_p_with_data : NULL,
|
|
|
|
|
NULL);
|
2013-09-12 10:28:21 -04:00
|
|
|
}
|
|
|
|
|
|
2012-09-14 15:21:29 -05:00
|
|
|
static NMActiveConnection *
|
2018-03-23 21:49:41 +01:00
|
|
|
active_connection_get_by_path (NMManager *self, const char *path)
|
2012-09-14 15:21:29 -05:00
|
|
|
{
|
2018-03-23 21:49:41 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2017-11-23 21:30:09 +01:00
|
|
|
NMActiveConnection *ac;
|
2012-09-14 15:21:29 -05:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
ac = nm_dbus_manager_lookup_object (nm_dbus_object_get_manager (NM_DBUS_OBJECT (self)),
|
|
|
|
|
path);
|
2018-03-23 21:49:41 +01:00
|
|
|
if ( !ac
|
|
|
|
|
|| !NM_IS_ACTIVE_CONNECTION (ac)
|
|
|
|
|
|| c_list_is_empty (&ac->active_connections_lst))
|
|
|
|
|
return NULL;
|
2012-09-14 15:21:29 -05:00
|
|
|
|
2018-03-23 21:49:41 +01:00
|
|
|
nm_assert (c_list_contains (&priv->active_connections_lst_head, &ac->active_connections_lst));
|
|
|
|
|
return ac;
|
2012-09-14 15:21:29 -05:00
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2012-08-22 17:11:31 -05:00
|
|
|
|
2015-01-30 19:52:53 +01:00
|
|
|
static void
|
2015-01-21 12:58:32 +01:00
|
|
|
_config_changed_cb (NMConfig *config, NMConfigData *config_data, NMConfigChangeFlags changes, NMConfigData *old_data, NMManager *self)
|
2015-01-30 19:52:53 +01:00
|
|
|
{
|
2018-04-10 15:55:16 +02:00
|
|
|
g_object_freeze_notify (G_OBJECT (self));
|
|
|
|
|
|
2015-07-03 11:06:39 +02:00
|
|
|
if (NM_FLAGS_HAS (changes, NM_CONFIG_CHANGE_GLOBAL_DNS_CONFIG))
|
2016-04-01 17:34:51 +02:00
|
|
|
_notify (self, PROP_GLOBAL_DNS_CONFIGURATION);
|
2018-04-10 15:55:16 +02:00
|
|
|
if ((!nm_config_data_get_connectivity_uri (config_data)) != (!nm_config_data_get_connectivity_uri (old_data)))
|
|
|
|
|
_notify (self, PROP_CONNECTIVITY_CHECK_AVAILABLE);
|
|
|
|
|
|
|
|
|
|
g_object_thaw_notify (G_OBJECT (self));
|
2015-01-30 19:52:53 +01:00
|
|
|
}
|
|
|
|
|
|
2016-05-30 15:42:44 +02:00
|
|
|
static void
|
|
|
|
|
_reload_auth_cb (NMAuthChain *chain,
|
|
|
|
|
GError *error,
|
|
|
|
|
GDBusMethodInvocation *context,
|
|
|
|
|
gpointer user_data)
|
|
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER (user_data);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
GError *ret_error = NULL;
|
|
|
|
|
NMAuthCallResult result;
|
|
|
|
|
guint32 flags;
|
|
|
|
|
NMAuthSubject *subject;
|
|
|
|
|
char s_buf[60];
|
2016-05-30 16:43:39 +02:00
|
|
|
NMConfigChangeFlags reload_type = NM_CONFIG_CHANGE_NONE;
|
2016-05-30 15:42:44 +02:00
|
|
|
|
|
|
|
|
g_assert (context);
|
|
|
|
|
|
|
|
|
|
priv->auth_chains = g_slist_remove (priv->auth_chains, chain);
|
|
|
|
|
flags = GPOINTER_TO_UINT (nm_auth_chain_get_data (chain, "flags"));
|
|
|
|
|
|
|
|
|
|
subject = nm_auth_chain_get_subject (chain);
|
|
|
|
|
|
|
|
|
|
result = nm_auth_chain_get_result (chain, NM_AUTH_PERMISSION_RELOAD);
|
|
|
|
|
if (error) {
|
|
|
|
|
_LOGD (LOGD_CORE, "Reload request failed: %s", error->message);
|
|
|
|
|
ret_error = g_error_new (NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
"Reload request failed: %s",
|
|
|
|
|
error->message);
|
|
|
|
|
} else if (result != NM_AUTH_CALL_RESULT_YES) {
|
|
|
|
|
ret_error = g_error_new_literal (NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
"Not authorized to reload configuration");
|
2016-05-30 16:43:39 +02:00
|
|
|
} else {
|
|
|
|
|
if (NM_FLAGS_ANY (flags, ~NM_MANAGER_RELOAD_FLAGS_ALL)) {
|
|
|
|
|
/* invalid flags */
|
|
|
|
|
} else if (flags == 0)
|
|
|
|
|
reload_type = NM_CONFIG_CHANGE_CAUSE_SIGHUP;
|
|
|
|
|
else {
|
|
|
|
|
if (NM_FLAGS_HAS (flags, NM_MANAGER_RELOAD_FLAGS_CONF))
|
|
|
|
|
reload_type |= NM_CONFIG_CHANGE_CAUSE_CONF;
|
|
|
|
|
if (NM_FLAGS_HAS (flags, NM_MANAGER_RELOAD_FLAGS_DNS_RC))
|
|
|
|
|
reload_type |= NM_CONFIG_CHANGE_CAUSE_DNS_RC;
|
|
|
|
|
if (NM_FLAGS_HAS (flags, NM_MANAGER_RELOAD_FLAGS_DNS_FULL))
|
|
|
|
|
reload_type |= NM_CONFIG_CHANGE_CAUSE_DNS_FULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (reload_type == NM_CONFIG_CHANGE_NONE) {
|
|
|
|
|
ret_error = g_error_new_literal (NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_INVALID_ARGUMENTS,
|
|
|
|
|
"Invalid flags for reload");
|
|
|
|
|
}
|
2016-05-30 15:42:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nm_audit_log_control_op (NM_AUDIT_OP_RELOAD,
|
|
|
|
|
nm_sprintf_buf (s_buf, "%u", flags),
|
|
|
|
|
ret_error == NULL, subject,
|
|
|
|
|
ret_error ? ret_error->message : NULL);
|
|
|
|
|
|
|
|
|
|
if (ret_error) {
|
|
|
|
|
g_dbus_method_invocation_take_error (context, ret_error);
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-09 18:06:32 +01:00
|
|
|
nm_config_reload (priv->config, reload_type, TRUE);
|
2016-05-30 15:42:44 +02:00
|
|
|
g_dbus_method_invocation_return_value (context, NULL);
|
|
|
|
|
|
|
|
|
|
out:
|
2018-04-09 11:52:55 +02:00
|
|
|
nm_auth_chain_destroy (chain);
|
2016-05-30 15:42:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
impl_manager_reload (NMDBusObject *obj,
|
|
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
|
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
|
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER (obj);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2016-05-30 15:42:44 +02:00
|
|
|
NMAuthChain *chain;
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
guint32 flags;
|
2016-05-30 15:42:44 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_variant_get (parameters, "(u)", &flags);
|
2016-05-30 15:42:44 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
chain = nm_auth_chain_new_context (invocation, _reload_auth_cb, self);
|
2016-05-30 15:42:44 +02:00
|
|
|
if (!chain) {
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_dbus_method_invocation_return_error_literal (invocation,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
"Unable to authenticate request");
|
2016-05-30 15:42:44 +02:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
priv->auth_chains = g_slist_append (priv->auth_chains, chain);
|
|
|
|
|
nm_auth_chain_set_data (chain, "flags", GUINT_TO_POINTER (flags), NULL);
|
|
|
|
|
nm_auth_chain_add_call (chain, NM_AUTH_PERMISSION_RELOAD, TRUE);
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2015-01-30 19:52:53 +01:00
|
|
|
|
2016-07-01 12:11:01 +02:00
|
|
|
NMDevice *
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
nm_manager_get_device_by_path (NMManager *self, const char *path)
|
2011-01-10 23:39:12 -06:00
|
|
|
{
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
NMDevice *device;
|
2011-01-10 23:39:12 -06:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
g_return_val_if_fail (path, NULL);
|
2011-03-15 17:04:35 -05:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
device = nm_dbus_manager_lookup_object (nm_dbus_object_get_manager (NM_DBUS_OBJECT (self)),
|
|
|
|
|
path);
|
2018-03-23 21:49:41 +01:00
|
|
|
if ( !device
|
|
|
|
|
|| !NM_IS_DEVICE (device)
|
|
|
|
|
|| c_list_is_empty (&device->devices_lst))
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
nm_assert (c_list_contains (&priv->devices_lst_head, &device->devices_lst));
|
|
|
|
|
return device;
|
2011-01-10 23:39:12 -06:00
|
|
|
}
|
|
|
|
|
|
2013-05-03 13:55:51 -04:00
|
|
|
NMDevice *
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
nm_manager_get_device_by_ifindex (NMManager *self, int ifindex)
|
2013-05-03 13:55:51 -04:00
|
|
|
{
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
NMDevice *device;
|
2013-05-03 13:55:51 -04:00
|
|
|
|
2018-06-22 15:47:41 +02:00
|
|
|
if (ifindex > 0) {
|
|
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst) {
|
|
|
|
|
if (nm_device_get_ifindex (device) == ifindex)
|
|
|
|
|
return device;
|
|
|
|
|
}
|
2013-05-03 13:55:51 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2015-04-29 10:56:36 +02:00
|
|
|
static NMDevice *
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
find_device_by_permanent_hw_addr (NMManager *self, const char *hwaddr)
|
2015-04-29 10:56:36 +02:00
|
|
|
{
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
NMDevice *device;
|
2014-09-18 17:50:47 -05:00
|
|
|
const char *device_addr;
|
2018-03-24 11:31:29 +01:00
|
|
|
guint8 hwaddr_bin[NM_UTILS_HWADDR_LEN_MAX];
|
|
|
|
|
gsize hwaddr_len;
|
2015-04-29 10:56:36 +02:00
|
|
|
|
2014-09-18 17:50:47 -05:00
|
|
|
g_return_val_if_fail (hwaddr != NULL, NULL);
|
2015-04-29 10:56:36 +02:00
|
|
|
|
2018-03-24 11:31:29 +01:00
|
|
|
if (!_nm_utils_hwaddr_aton (hwaddr, hwaddr_bin, sizeof (hwaddr_bin), &hwaddr_len))
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst) {
|
|
|
|
|
device_addr = nm_device_get_permanent_hw_address (device);
|
|
|
|
|
if ( device_addr
|
2018-03-24 11:31:29 +01:00
|
|
|
&& nm_utils_hwaddr_matches (hwaddr_bin, hwaddr_len, device_addr, -1))
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
return device;
|
2015-04-29 10:56:36 +02:00
|
|
|
}
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static NMDevice *
|
all: don't use gchar/gshort/gint/glong but C types
We commonly don't use the glib typedefs for char/short/int/long,
but their C types directly.
$ git grep '\<g\(char\|short\|int\|long\|float\|double\)\>' | wc -l
587
$ git grep '\<\(char\|short\|int\|long\|float\|double\)\>' | wc -l
21114
One could argue that using the glib typedefs is preferable in
public API (of our glib based libnm library) or where it clearly
is related to glib, like during
g_object_set (obj, PROPERTY, (gint) value, NULL);
However, that argument does not seem strong, because in practice we don't
follow that argument today, and seldomly use the glib typedefs.
Also, the style guide for this would be hard to formalize, because
"using them where clearly related to a glib" is a very loose suggestion.
Also note that glib typedefs will always just be typedefs of the
underlying C types. There is no danger of glib changing the meaning
of these typedefs (because that would be a major API break of glib).
A simple style guide is instead: don't use these typedefs.
No manual actions, I only ran the bash script:
FILES=($(git ls-files '*.[hc]'))
sed -i \
-e 's/\<g\(char\|short\|int\|long\|float\|double\)\>\( [^ ]\)/\1\2/g' \
-e 's/\<g\(char\|short\|int\|long\|float\|double\)\> /\1 /g' \
-e 's/\<g\(char\|short\|int\|long\|float\|double\)\>/\1/g' \
"${FILES[@]}"
2018-07-11 07:40:19 +02:00
|
|
|
find_device_by_ip_iface (NMManager *self, const char *iface)
|
2015-04-29 10:56:36 +02:00
|
|
|
{
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
NMDevice *device;
|
2015-04-29 10:56:36 +02:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
g_return_val_if_fail (iface, NULL);
|
2014-09-24 16:58:07 -05:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst) {
|
|
|
|
|
if ( nm_device_is_real (device)
|
|
|
|
|
&& nm_streq0 (nm_device_get_ip_iface (device), iface))
|
|
|
|
|
return device;
|
2015-04-29 10:56:36 +02:00
|
|
|
}
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2014-10-15 21:17:45 -05:00
|
|
|
/**
|
|
|
|
|
* find_device_by_iface:
|
|
|
|
|
* @self: the #NMManager
|
|
|
|
|
* @iface: the device interface to find
|
|
|
|
|
* @connection: a connection to ensure the returned device is compatible with
|
|
|
|
|
* @slave: a slave connection to ensure a master is compatible with
|
|
|
|
|
*
|
|
|
|
|
* Finds a device by interface name, preferring realized devices. If @slave
|
|
|
|
|
* is given, this function will only return master devices and will ensure
|
|
|
|
|
* @slave, when activated, can be a slave of the returned master device. If
|
|
|
|
|
* @connection is given, this function will only consider devices that are
|
|
|
|
|
* compatible with @connection.
|
|
|
|
|
*
|
|
|
|
|
* Returns: the matching #NMDevice
|
|
|
|
|
*/
|
2014-09-24 14:57:14 -05:00
|
|
|
static NMDevice *
|
2014-10-15 21:17:45 -05:00
|
|
|
find_device_by_iface (NMManager *self,
|
|
|
|
|
const char *iface,
|
|
|
|
|
NMConnection *connection,
|
|
|
|
|
NMConnection *slave)
|
2014-09-24 14:57:14 -05:00
|
|
|
{
|
2014-10-15 21:17:45 -05:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
NMDevice *fallback = NULL;
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMDevice *candidate;
|
2014-09-24 14:57:14 -05:00
|
|
|
|
2014-10-15 21:17:45 -05:00
|
|
|
g_return_val_if_fail (iface != NULL, NULL);
|
|
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_for_each_entry (candidate, &priv->devices_lst_head, devices_lst) {
|
2014-10-15 21:17:45 -05:00
|
|
|
|
|
|
|
|
if (strcmp (nm_device_get_iface (candidate), iface))
|
|
|
|
|
continue;
|
2018-06-27 17:00:55 +02:00
|
|
|
if (connection && !nm_device_check_connection_compatible (candidate, connection, NULL))
|
2014-10-15 21:17:45 -05:00
|
|
|
continue;
|
|
|
|
|
if (slave) {
|
|
|
|
|
if (!nm_device_is_master (candidate))
|
|
|
|
|
continue;
|
|
|
|
|
if (!nm_device_check_slave_connection_compatible (candidate, slave))
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (nm_device_is_real (candidate))
|
|
|
|
|
return candidate;
|
|
|
|
|
else if (!fallback)
|
|
|
|
|
fallback = candidate;
|
2014-09-24 14:57:14 -05:00
|
|
|
}
|
2014-10-15 21:17:45 -05:00
|
|
|
return fallback;
|
2014-09-24 14:57:14 -05:00
|
|
|
}
|
|
|
|
|
|
2010-05-22 08:55:30 -07:00
|
|
|
static gboolean
|
|
|
|
|
manager_sleeping (NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
|
|
|
|
|
if (priv->sleeping || !priv->net_enabled)
|
|
|
|
|
return TRUE;
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
|
|
|
|
|
2014-04-15 17:55:17 +02:00
|
|
|
static const char *
|
|
|
|
|
_nm_state_to_string (NMState state)
|
2013-07-31 09:14:39 -04:00
|
|
|
{
|
|
|
|
|
switch (state) {
|
|
|
|
|
case NM_STATE_ASLEEP:
|
2014-04-15 17:55:17 +02:00
|
|
|
return "ASLEEP";
|
2013-07-31 09:14:39 -04:00
|
|
|
case NM_STATE_DISCONNECTED:
|
2014-04-15 17:55:17 +02:00
|
|
|
return "DISCONNECTED";
|
2013-07-31 09:14:39 -04:00
|
|
|
case NM_STATE_DISCONNECTING:
|
2014-04-15 17:55:17 +02:00
|
|
|
return "DISCONNECTING";
|
2013-07-31 09:14:39 -04:00
|
|
|
case NM_STATE_CONNECTING:
|
2014-04-15 17:55:17 +02:00
|
|
|
return "CONNECTING";
|
2013-07-31 09:14:39 -04:00
|
|
|
case NM_STATE_CONNECTED_LOCAL:
|
2014-04-15 17:55:17 +02:00
|
|
|
return "CONNECTED_LOCAL";
|
2013-07-31 09:14:39 -04:00
|
|
|
case NM_STATE_CONNECTED_SITE:
|
2014-04-15 17:55:17 +02:00
|
|
|
return "CONNECTED_SITE";
|
2013-07-31 09:14:39 -04:00
|
|
|
case NM_STATE_CONNECTED_GLOBAL:
|
2014-04-15 17:55:17 +02:00
|
|
|
return "CONNECTED_GLOBAL";
|
2013-07-31 09:14:39 -04:00
|
|
|
case NM_STATE_UNKNOWN:
|
|
|
|
|
default:
|
2014-04-15 17:55:17 +02:00
|
|
|
return "UNKNOWN";
|
2013-07-31 09:14:39 -04:00
|
|
|
}
|
2014-04-15 17:55:17 +02:00
|
|
|
}
|
|
|
|
|
|
2013-11-08 12:23:43 -05:00
|
|
|
static NMState
|
2017-03-20 13:36:00 +00:00
|
|
|
find_best_device_state (NMManager *manager)
|
2013-11-08 12:23:43 -05:00
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (manager);
|
|
|
|
|
NMState best_state = NM_STATE_DISCONNECTED;
|
2017-11-23 21:30:09 +01:00
|
|
|
NMActiveConnection *ac;
|
2013-11-08 12:23:43 -05:00
|
|
|
|
2017-11-23 21:30:09 +01:00
|
|
|
c_list_for_each_entry (ac, &priv->active_connections_lst_head, active_connections_lst) {
|
2013-11-08 12:23:43 -05:00
|
|
|
NMActiveConnectionState ac_state = nm_active_connection_get_state (ac);
|
|
|
|
|
|
|
|
|
|
switch (ac_state) {
|
|
|
|
|
case NM_ACTIVE_CONNECTION_STATE_ACTIVATED:
|
2017-11-23 21:53:04 +01:00
|
|
|
if (nm_active_connection_get_default (ac, AF_UNSPEC)) {
|
2017-10-20 10:37:11 +02:00
|
|
|
if (priv->connectivity_state == NM_CONNECTIVITY_FULL)
|
2013-11-08 12:23:43 -05:00
|
|
|
return NM_STATE_CONNECTED_GLOBAL;
|
|
|
|
|
|
2015-01-13 14:48:29 -05:00
|
|
|
best_state = NM_STATE_CONNECTED_SITE;
|
2013-11-08 12:23:43 -05:00
|
|
|
} else {
|
|
|
|
|
if (best_state < NM_STATE_CONNECTING)
|
|
|
|
|
best_state = NM_STATE_CONNECTED_LOCAL;
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case NM_ACTIVE_CONNECTION_STATE_ACTIVATING:
|
2017-03-13 15:34:14 +01:00
|
|
|
if (!NM_IN_SET (nm_active_connection_get_activation_type (ac),
|
|
|
|
|
NM_ACTIVATION_TYPE_EXTERNAL,
|
|
|
|
|
NM_ACTIVATION_TYPE_ASSUME)) {
|
2013-11-08 12:23:43 -05:00
|
|
|
if (best_state != NM_STATE_CONNECTED_GLOBAL)
|
|
|
|
|
best_state = NM_STATE_CONNECTING;
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case NM_ACTIVE_CONNECTION_STATE_DEACTIVATING:
|
2017-03-13 15:34:14 +01:00
|
|
|
if (!NM_IN_SET (nm_active_connection_get_activation_type (ac),
|
|
|
|
|
NM_ACTIVATION_TYPE_EXTERNAL,
|
|
|
|
|
NM_ACTIVATION_TYPE_ASSUME)) {
|
2013-11-08 12:23:43 -05:00
|
|
|
if (best_state < NM_STATE_DISCONNECTING)
|
|
|
|
|
best_state = NM_STATE_DISCONNECTING;
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return best_state;
|
|
|
|
|
}
|
|
|
|
|
|
2015-06-03 09:15:24 +02:00
|
|
|
static void
|
2016-03-02 11:38:26 +01:00
|
|
|
nm_manager_update_metered (NMManager *self)
|
2015-06-03 09:15:24 +02:00
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv;
|
|
|
|
|
NMDevice *device;
|
|
|
|
|
NMMetered value = NM_METERED_UNKNOWN;
|
|
|
|
|
|
2016-03-02 11:38:26 +01:00
|
|
|
g_return_if_fail (NM_IS_MANAGER (self));
|
|
|
|
|
priv = NM_MANAGER_GET_PRIVATE (self);
|
2015-06-03 09:15:24 +02:00
|
|
|
|
|
|
|
|
if (priv->primary_connection) {
|
|
|
|
|
device = nm_active_connection_get_device (priv->primary_connection);
|
|
|
|
|
if (device)
|
|
|
|
|
value = nm_device_get_metered (device);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (value != priv->metered) {
|
|
|
|
|
priv->metered = value;
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGD (LOGD_CORE, "new metered value: %d", (int) priv->metered);
|
2016-04-01 17:34:51 +02:00
|
|
|
_notify (self, PROP_METERED);
|
2015-06-03 09:15:24 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-29 10:58:59 +01:00
|
|
|
NMMetered
|
|
|
|
|
nm_manager_get_metered (NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
g_return_val_if_fail (NM_IS_MANAGER (self), NM_METERED_UNKNOWN);
|
|
|
|
|
|
|
|
|
|
return NM_MANAGER_GET_PRIVATE (self)->metered;
|
|
|
|
|
}
|
|
|
|
|
|
2009-05-20 12:02:18 -04:00
|
|
|
static void
|
2018-04-11 11:44:47 +02:00
|
|
|
nm_manager_update_state (NMManager *self)
|
2009-05-20 12:02:18 -04:00
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv;
|
|
|
|
|
NMState new_state = NM_STATE_DISCONNECTED;
|
|
|
|
|
|
2018-04-11 11:44:47 +02:00
|
|
|
g_return_if_fail (NM_IS_MANAGER (self));
|
2009-05-20 12:02:18 -04:00
|
|
|
|
2018-04-11 11:44:47 +02:00
|
|
|
priv = NM_MANAGER_GET_PRIVATE (self);
|
2009-05-20 12:02:18 -04:00
|
|
|
|
2018-04-11 11:44:47 +02:00
|
|
|
if (manager_sleeping (self))
|
2009-05-20 12:02:18 -04:00
|
|
|
new_state = NM_STATE_ASLEEP;
|
2013-11-08 12:23:43 -05:00
|
|
|
else
|
2018-04-11 11:44:47 +02:00
|
|
|
new_state = find_best_device_state (self);
|
2017-02-16 10:07:10 +01:00
|
|
|
|
2017-03-20 13:36:00 +00:00
|
|
|
if ( new_state >= NM_STATE_CONNECTED_LOCAL
|
|
|
|
|
&& priv->connectivity_state == NM_CONNECTIVITY_FULL) {
|
|
|
|
|
new_state = NM_STATE_CONNECTED_GLOBAL;
|
2015-01-13 15:35:10 -05:00
|
|
|
}
|
2013-07-31 09:14:39 -04:00
|
|
|
|
2018-04-11 11:44:47 +02:00
|
|
|
if (priv->state == new_state)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
priv->state = new_state;
|
|
|
|
|
|
|
|
|
|
_LOGI (LOGD_CORE, "NetworkManager state is now %s", _nm_state_to_string (new_state));
|
|
|
|
|
|
|
|
|
|
_notify (self, PROP_STATE);
|
|
|
|
|
nm_dbus_object_emit_signal (NM_DBUS_OBJECT (self),
|
|
|
|
|
&interface_info_manager,
|
|
|
|
|
&signal_info_state_changed,
|
|
|
|
|
"(u)",
|
|
|
|
|
(guint32) priv->state);
|
2009-05-20 12:02:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
manager_device_state_changed (NMDevice *device,
|
|
|
|
|
NMDeviceState new_state,
|
|
|
|
|
NMDeviceState old_state,
|
|
|
|
|
NMDeviceStateReason reason,
|
|
|
|
|
gpointer user_data)
|
|
|
|
|
{
|
2011-12-14 13:41:57 -06:00
|
|
|
NMManager *self = NM_MANAGER (user_data);
|
2015-12-13 22:08:23 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2009-05-20 12:02:18 -04:00
|
|
|
|
2018-03-19 14:15:56 +01:00
|
|
|
if ( old_state == NM_DEVICE_STATE_UNMANAGED
|
|
|
|
|
&& new_state > NM_DEVICE_STATE_UNMANAGED)
|
|
|
|
|
retry_connections_for_parent_device (self, device);
|
|
|
|
|
|
2018-07-06 21:44:06 +02:00
|
|
|
if (NM_IN_SET (new_state,
|
|
|
|
|
NM_DEVICE_STATE_UNMANAGED,
|
|
|
|
|
NM_DEVICE_STATE_UNAVAILABLE,
|
|
|
|
|
NM_DEVICE_STATE_DISCONNECTED,
|
|
|
|
|
NM_DEVICE_STATE_PREPARE,
|
|
|
|
|
NM_DEVICE_STATE_FAILED))
|
2016-04-01 17:34:51 +02:00
|
|
|
_notify (self, PROP_ACTIVE_CONNECTIONS);
|
2015-12-13 22:08:23 +01:00
|
|
|
|
2018-07-06 21:35:04 +02:00
|
|
|
if (NM_IN_SET (new_state,
|
|
|
|
|
NM_DEVICE_STATE_UNMANAGED,
|
|
|
|
|
NM_DEVICE_STATE_DISCONNECTED,
|
|
|
|
|
NM_DEVICE_STATE_ACTIVATED))
|
|
|
|
|
nm_manager_write_device_state (self, device);
|
|
|
|
|
|
2018-07-06 21:44:06 +02:00
|
|
|
if (NM_IN_SET (new_state,
|
|
|
|
|
NM_DEVICE_STATE_UNAVAILABLE,
|
|
|
|
|
NM_DEVICE_STATE_DISCONNECTED))
|
2015-12-13 22:08:23 +01:00
|
|
|
nm_settings_device_added (priv->settings, device);
|
2009-05-20 12:02:18 -04:00
|
|
|
}
|
|
|
|
|
|
2013-08-13 17:45:34 -04:00
|
|
|
static void device_has_pending_action_changed (NMDevice *device,
|
|
|
|
|
GParamSpec *pspec,
|
|
|
|
|
NMManager *self);
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
check_if_startup_complete (NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMDevice *device;
|
2018-09-19 17:22:50 +02:00
|
|
|
const char *reason;
|
2013-08-13 17:45:34 -04:00
|
|
|
|
|
|
|
|
if (!priv->startup)
|
|
|
|
|
return;
|
|
|
|
|
|
2015-08-25 13:32:53 +02:00
|
|
|
if (!priv->devices_inited)
|
|
|
|
|
return;
|
|
|
|
|
|
2018-09-19 17:22:50 +02:00
|
|
|
reason = nm_settings_get_startup_complete_blocked_reason (priv->settings);
|
|
|
|
|
if (reason) {
|
|
|
|
|
_LOGD (LOGD_CORE, "startup complete is waiting for connection (%s)",
|
|
|
|
|
reason);
|
2014-12-18 16:04:07 -05:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst) {
|
2018-09-19 17:22:50 +02:00
|
|
|
reason = nm_device_has_pending_action_reason (device);
|
|
|
|
|
if (reason) {
|
|
|
|
|
_LOGD (LOGD_CORE, "startup complete is waiting for device '%s' (%s)",
|
|
|
|
|
nm_device_get_iface (device),
|
|
|
|
|
reason);
|
2013-08-13 17:45:34 -04:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGI (LOGD_CORE, "startup complete");
|
2013-08-13 17:45:34 -04:00
|
|
|
|
|
|
|
|
priv->startup = FALSE;
|
|
|
|
|
|
2017-03-14 17:39:26 +01:00
|
|
|
/* we no longer care about these signals. Startup-complete only
|
|
|
|
|
* happens once. */
|
|
|
|
|
g_signal_handlers_disconnect_by_func (priv->settings, G_CALLBACK (settings_startup_complete_changed), self);
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst) {
|
|
|
|
|
g_signal_handlers_disconnect_by_func (device,
|
2017-03-14 17:39:26 +01:00
|
|
|
G_CALLBACK (device_has_pending_action_changed),
|
|
|
|
|
self);
|
2013-08-13 17:45:34 -04:00
|
|
|
}
|
2014-10-29 09:12:18 -05:00
|
|
|
|
2017-03-14 17:39:26 +01:00
|
|
|
_notify (self, PROP_STARTUP);
|
|
|
|
|
|
2016-07-01 14:30:00 +02:00
|
|
|
if (nm_config_get_configure_and_quit (priv->config))
|
2014-10-29 09:12:18 -05:00
|
|
|
g_signal_emit (self, signals[CONFIGURE_QUIT], 0);
|
2013-08-13 17:45:34 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
device_has_pending_action_changed (NMDevice *device,
|
|
|
|
|
GParamSpec *pspec,
|
|
|
|
|
NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
check_if_startup_complete (self);
|
|
|
|
|
}
|
|
|
|
|
|
2014-12-18 16:04:07 -05:00
|
|
|
static void
|
|
|
|
|
settings_startup_complete_changed (NMSettings *settings,
|
|
|
|
|
GParamSpec *pspec,
|
|
|
|
|
NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
check_if_startup_complete (self);
|
|
|
|
|
}
|
|
|
|
|
|
2016-12-26 11:12:39 +01:00
|
|
|
static void
|
|
|
|
|
_parent_notify_changed (NMManager *self,
|
|
|
|
|
NMDevice *device,
|
|
|
|
|
gboolean device_removed)
|
|
|
|
|
{
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
NMDevice *candidate;
|
2016-12-26 11:12:39 +01:00
|
|
|
|
|
|
|
|
nm_assert (NM_IS_DEVICE (device));
|
|
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
again:
|
|
|
|
|
c_list_for_each_entry (candidate, &priv->devices_lst_head, devices_lst) {
|
|
|
|
|
if (nm_device_parent_notify_changed (candidate, device, device_removed)) {
|
2016-12-26 11:12:39 +01:00
|
|
|
/* in the unlikely event that this changes anything, we start iterating
|
|
|
|
|
* again, to be sure that the device list is up-to-date. */
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
goto again;
|
|
|
|
|
}
|
2016-12-26 11:12:39 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-05-25 17:46:33 +02:00
|
|
|
static gboolean
|
|
|
|
|
device_is_wake_on_lan (NMPlatform *platform, NMDevice *device)
|
|
|
|
|
{
|
core: avoid assertion when removing devices
remove_device() is also called when the device has no longer a valid
ifindex and so device_is_wake_on_lan() must do an extra check to avoid
the following assertion:
nmp_cache_lookup_entry_link: assertion 'ifindex > 0' failed
0 _g_log_abort () from target:/lib64/libglib-2.0.so.0
1 g_logv () from target:/lib64/libglib-2.0.so.0
2 g_log () from target:/lib64/libglib-2.0.so.0
3 nmp_cache_lookup_entry_link (cache=0xb858f0, ifindex=ifindex@entry=0) at ../src/platform/nmp-object.c:1713
4 nmp_cache_lookup_link (cache=<optimized out>, ifindex=ifindex@entry=0) at ../src/platform/nmp-object.c:1728
5 nm_platform_link_get_obj (self=self@entry=0xb85840, ifindex=ifindex@entry=0, visible_only=visible_only@entry=1) at ../src/platform/nm-platform.c:759
6 nm_platform_link_get (self=self@entry=0xb85840, ifindex=ifindex@entry=0) at ../src/platform/nm-platform.c:784
7 nm_platform_link_get_type (self=self@entry=0xb85840, ifindex=ifindex@entry=0) at ../src/platform/nm-platform.c:1065
8 link_get_wake_on_lan (platform=0xb85840, ifindex=0) at ../src/platform/nm-linux-platform.c:6963
9 nm_platform_link_get_wake_on_lan (self=self@entry=0xb85840, ifindex=0) at ../src/platform/nm-platform.c:1705
10 device_is_wake_on_lan (platform=0xb85840, device=<optimized out>) at ../src/nm-manager.c:1617
11 remove_device (self=0xbd0060, device=<optimized out>, device@entry=0xd298c0, quitting=quitting@entry=0, allow_unmanage=allow_unmanage@entry=1)
12 device_removed_cb (device=0xd298c0, user_data=0xbd0060) at ../src/nm-manager.c:1698
13 _g_closure_invoke_va () from target:/lib64/libgobject-2.0.so.0
14 g_signal_emit_valist () from target:/lib64/libgobject-2.0.so.0
15 g_signal_emit () from target:/lib64/libgobject-2.0.so.0
16 available_connections_check_delete_unrealized_on_idle (user_data=0xd298c0) at ../src/devices/nm-device.c:4446
Fixes: ca3bbede746a7d7031ba6a011c69ad7adb1dca3e
2018-12-04 19:05:27 +01:00
|
|
|
int ifindex;
|
|
|
|
|
|
|
|
|
|
ifindex = nm_device_get_ip_ifindex (device);
|
|
|
|
|
if (ifindex <= 0)
|
|
|
|
|
return FALSE;
|
|
|
|
|
return nm_platform_link_get_wake_on_lan (platform, ifindex);
|
2018-05-25 17:46:33 +02:00
|
|
|
}
|
|
|
|
|
|
2013-08-15 12:58:22 -04:00
|
|
|
static void
|
2016-03-02 11:38:26 +01:00
|
|
|
remove_device (NMManager *self,
|
2014-10-13 11:58:26 -05:00
|
|
|
NMDevice *device,
|
|
|
|
|
gboolean quitting,
|
|
|
|
|
gboolean allow_unmanage)
|
2009-05-20 12:02:18 -04:00
|
|
|
{
|
2016-03-02 11:38:26 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2016-04-20 17:46:41 +02:00
|
|
|
gboolean unmanage = FALSE;
|
2009-07-09 11:06:31 -04:00
|
|
|
|
2018-05-25 17:46:33 +02:00
|
|
|
_LOG2D (LOGD_DEVICE, device, "removing device (allow_unmanage %d, managed %d, wol %d)",
|
|
|
|
|
allow_unmanage, nm_device_get_managed (device, FALSE),
|
|
|
|
|
device_is_wake_on_lan (priv->platform, device));
|
2014-12-18 10:23:06 -06:00
|
|
|
|
device: remove default-unmanaged and refactor unmanaged flags
Get rid of NM_UNMANAGED_DEFAULT and refine the interaction between
unmanaged flags, device state and managed property.
Previously, the NM_UNMANAGED_DEFAULT was special in that a device was
still considered managed if it had solely the NM_UNMANAGED_DEFAULT flag
set and its state was managed. Thus, whether the device (state) was managed,
depended on the device state too.
Now, a device is considered managed (or unmanaged) based on the unmanaged
flags and realization state alone. At the same time, the device state
directly corresponds to the managed property of the device. Of course,
while changing the unmanaged flags, that invariant is shortly violated
until the state transistion is complete.
Introduce more unmanaged flags whereas some of them are non-authorative.
For example, the EXTERNAL_DOWN flag has only effect as long as the user
didn't explicitly manage the device (NM_UNMANAGED_USER_EXPLICIT). In other
words, certain flags can render other flags ineffective. Whether the device
is considered managed depends on the flags but also at the explicitly unset flags.
In a way, this is similar to previous where NM_UNMANAGED_DEFAULT was ignored
(if no other flags were present).
Also, previously a device that was NM_UNMANAGED_DEFAULT and in disconnected
state would transition back to unmanaged. No longer do that. Once a device is
managed, it stays managed as long as the flags indicate it should be managed.
However, the user can also modify the unmanaged flags via the D-Bus API.
Also get rid or nm_device_finish_init(). That was previously called
by NMManager after add_device(). As we now realize devices (possibly
multiple times) this should be handled during realization.
https://bugzilla.gnome.org/show_bug.cgi?id=746566
2015-09-15 15:35:16 +02:00
|
|
|
if (allow_unmanage && nm_device_get_managed (device, FALSE)) {
|
2013-11-08 15:07:20 -06:00
|
|
|
|
2018-05-25 17:46:33 +02:00
|
|
|
if (quitting) {
|
|
|
|
|
/* Leave configured if wo(w)lan and quitting */
|
|
|
|
|
if (device_is_wake_on_lan (priv->platform, device))
|
|
|
|
|
unmanage = FALSE;
|
|
|
|
|
else
|
|
|
|
|
unmanage = nm_device_unmanage_on_quit (device);
|
|
|
|
|
} else {
|
2016-04-01 11:04:34 +02:00
|
|
|
/* the device is already gone. Unmanage it. */
|
2016-06-21 11:04:38 +02:00
|
|
|
unmanage = TRUE;
|
2016-04-01 11:04:34 +02:00
|
|
|
}
|
core: fix deactivation of assumed connections on device removal (bgo #729833)
The following procedure leaves an NMActiveConnection around for a deactivated
device, which causes errors in libnm-glib clients when they cannot create the
GObject for the non-existent device of the AC.
1) allow a device which can assume connections to be activated
2) stop NM, which should leave the device's IP configuration up
3) start NM and allow it to assume the device's existing connection
4) remove the device, either by unplugging it or 'rmmod'
The device is removed by nm-manager.c::remove_device(), but the device object
is not moved to UNMANAGED state, leaving the NMActiveConnection completely
unaware the device has gone away.
The nm-manager.c::remove_device() code did not correctly handle moving a
forcibly removed (eg, by unplugging or 'ip link del' or 'rmmod') device to
the UNMANAGED state when the device was active with an assumed connection.
To fix this, make the conditions when the device should be deactivated
on removal much more explicit.
A device should be deactivated on removal if:
1) it is forcibly removed, eg by the kernel network interface being
removed due to 'ip link del' or hotplugging, or internally by NM due
to a parent WWAN interface taking priority over a WWAN ethernet interface
2) if the device cannot assume connections, in which case NetworkManager
must have activated the device and since we cannot assume the connection
on restart, we should deactivate it
3) if the device is not activated, to ensure that its IPv6 parameters
and other things get reset to the pre-NetworkManager values
https://bugzilla.gnome.org/show_bug.cgi?id=729833
2014-05-08 14:32:02 -05:00
|
|
|
|
2014-05-23 18:25:05 -05:00
|
|
|
if (unmanage) {
|
|
|
|
|
if (quitting)
|
2016-01-13 12:03:47 +01:00
|
|
|
nm_device_set_unmanaged_by_quitting (device);
|
2017-03-13 15:34:14 +01:00
|
|
|
else {
|
|
|
|
|
nm_device_sys_iface_state_set (device, NM_DEVICE_SYS_IFACE_STATE_REMOVED);
|
device: remove default-unmanaged and refactor unmanaged flags
Get rid of NM_UNMANAGED_DEFAULT and refine the interaction between
unmanaged flags, device state and managed property.
Previously, the NM_UNMANAGED_DEFAULT was special in that a device was
still considered managed if it had solely the NM_UNMANAGED_DEFAULT flag
set and its state was managed. Thus, whether the device (state) was managed,
depended on the device state too.
Now, a device is considered managed (or unmanaged) based on the unmanaged
flags and realization state alone. At the same time, the device state
directly corresponds to the managed property of the device. Of course,
while changing the unmanaged flags, that invariant is shortly violated
until the state transistion is complete.
Introduce more unmanaged flags whereas some of them are non-authorative.
For example, the EXTERNAL_DOWN flag has only effect as long as the user
didn't explicitly manage the device (NM_UNMANAGED_USER_EXPLICIT). In other
words, certain flags can render other flags ineffective. Whether the device
is considered managed depends on the flags but also at the explicitly unset flags.
In a way, this is similar to previous where NM_UNMANAGED_DEFAULT was ignored
(if no other flags were present).
Also, previously a device that was NM_UNMANAGED_DEFAULT and in disconnected
state would transition back to unmanaged. No longer do that. Once a device is
managed, it stays managed as long as the flags indicate it should be managed.
However, the user can also modify the unmanaged flags via the D-Bus API.
Also get rid or nm_device_finish_init(). That was previously called
by NMManager after add_device(). As we now realize devices (possibly
multiple times) this should be handled during realization.
https://bugzilla.gnome.org/show_bug.cgi?id=746566
2015-09-15 15:35:16 +02:00
|
|
|
nm_device_set_unmanaged_by_flags (device, NM_UNMANAGED_PLATFORM_INIT, TRUE, NM_DEVICE_STATE_REASON_REMOVED);
|
2017-03-13 15:34:14 +01:00
|
|
|
}
|
2018-01-30 18:01:18 +01:00
|
|
|
} else if (quitting && nm_config_get_configure_and_quit (priv->config) == NM_CONFIG_CONFIGURE_AND_QUIT_ENABLED) {
|
2014-10-29 09:12:18 -05:00
|
|
|
nm_device_spawn_iface_helper (device);
|
2014-05-23 18:25:05 -05:00
|
|
|
}
|
2009-08-03 17:15:03 -04:00
|
|
|
}
|
2009-05-20 12:02:18 -04:00
|
|
|
|
2016-03-02 11:38:26 +01:00
|
|
|
g_signal_handlers_disconnect_matched (device, G_SIGNAL_MATCH_DATA, 0, 0, NULL, NULL, self);
|
2009-05-20 12:02:18 -04:00
|
|
|
|
2013-11-14 11:33:12 -06:00
|
|
|
nm_settings_device_removed (priv->settings, device, quitting);
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
|
|
|
|
|
c_list_unlink (&device->devices_lst);
|
2014-03-27 12:16:46 -04:00
|
|
|
|
2016-12-26 11:12:39 +01:00
|
|
|
_parent_notify_changed (self, device, TRUE);
|
|
|
|
|
|
2014-10-06 11:21:54 -05:00
|
|
|
if (nm_device_is_real (device)) {
|
2016-04-20 17:46:41 +02:00
|
|
|
gboolean unconfigure_ip_config = !quitting || unmanage;
|
|
|
|
|
|
|
|
|
|
/* When we don't unmanage the device on shutdown, we want to preserve the DNS
|
|
|
|
|
* configuration in resolv.conf. For that, we must leak the configuration
|
|
|
|
|
* in NMPolicy/NMDnsManager. We do that, by emitting the device-removed signal
|
|
|
|
|
* with device's ip-config object still uncleared. In that case, NMPolicy
|
|
|
|
|
* never learns to unconfigure the ip-config objects and does not remove them
|
|
|
|
|
* from DNS on shutdown (which is ugly, because we don't cleanup the memory
|
|
|
|
|
* properly).
|
|
|
|
|
*
|
|
|
|
|
* Control that by passing @unconfigure_ip_config. */
|
|
|
|
|
nm_device_removed (device, unconfigure_ip_config);
|
2016-04-04 16:58:33 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
_emit_device_added_removed (self, device, FALSE);
|
2017-03-17 23:30:43 +01:00
|
|
|
} else {
|
|
|
|
|
/* unrealize() does not release a slave device from master and
|
|
|
|
|
* clear IP configurations, do it here */
|
|
|
|
|
nm_device_removed (device, TRUE);
|
2014-10-06 11:21:54 -05:00
|
|
|
}
|
2017-03-17 23:30:43 +01:00
|
|
|
|
2016-03-02 11:38:26 +01:00
|
|
|
g_signal_emit (self, signals[INTERNAL_DEVICE_REMOVED], 0, device);
|
2016-04-01 17:34:51 +02:00
|
|
|
_notify (self, PROP_ALL_DEVICES);
|
2014-04-09 12:31:08 -05:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
nm_dbus_object_clear_and_unexport (&device);
|
2009-05-20 12:02:18 -04:00
|
|
|
|
2016-03-02 11:38:26 +01:00
|
|
|
check_if_startup_complete (self);
|
2009-05-20 12:02:18 -04:00
|
|
|
}
|
|
|
|
|
|
2014-02-10 08:49:47 -06:00
|
|
|
static void
|
|
|
|
|
device_removed_cb (NMDevice *device, gpointer user_data)
|
|
|
|
|
{
|
2014-10-13 11:58:26 -05:00
|
|
|
remove_device (NM_MANAGER (user_data), device, FALSE, TRUE);
|
2014-02-10 08:49:47 -06:00
|
|
|
}
|
|
|
|
|
|
2007-09-25 16:47:53 +00:00
|
|
|
NMState
|
|
|
|
|
nm_manager_get_state (NMManager *manager)
|
|
|
|
|
{
|
|
|
|
|
g_return_val_if_fail (NM_IS_MANAGER (manager), NM_STATE_UNKNOWN);
|
|
|
|
|
|
|
|
|
|
return NM_MANAGER_GET_PRIVATE (manager)->state;
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2009-06-11 00:39:12 -04:00
|
|
|
|
all: standardize on NMSettingWired:mac-address for all VLANs
Currently, ethernet-based VLANs can specify the hardware address of
the parent device (and, in theory, the cloned hardware address and MTU
of the VLAN device) by using an NMSettingWired in addition to the
NMSettingVlan.
The theory was that non-ethernet-based VLANs, when we eventually
supported them, would likewise use the setting type corresponding to
their parent device. However, this turns out to be both complicated
(the settings plugins and connection editor would have a
hard-to-impossible time figuring out which setting type to use in some
cases) and incorrect (for most L2 settings [eg, BSSID, bond mode,
etc], the VLAN can't have its own values separate from the parent
device).
What we should have done was just have :mac-address,
:cloned-mac-address, and :mtu properties on NMSettingVlan. However, at
this point, for backward-compatibility, we will just stick with using
a combination of NMSettingVlan and NMSettingWired, but we will use
NMSettingWired regardless of the underlying hardware type.
2013-09-09 09:40:40 -04:00
|
|
|
static NMDevice *
|
2016-02-17 15:18:37 +01:00
|
|
|
find_parent_device_for_connection (NMManager *self, NMConnection *connection, NMDeviceFactory *cached_factory)
|
2012-02-12 14:48:44 -06:00
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2014-09-18 17:50:47 -05:00
|
|
|
NMDeviceFactory *factory;
|
|
|
|
|
const char *parent_name = NULL;
|
2015-07-14 16:53:24 +02:00
|
|
|
NMSettingsConnection *parent_connection;
|
2014-09-18 17:50:47 -05:00
|
|
|
NMDevice *parent, *first_compatible = NULL;
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMDevice *candidate;
|
2012-02-12 14:48:44 -06:00
|
|
|
|
2014-09-24 16:58:07 -05:00
|
|
|
g_return_val_if_fail (NM_IS_CONNECTION (connection), NULL);
|
|
|
|
|
|
2016-02-17 15:18:37 +01:00
|
|
|
if (!cached_factory) {
|
|
|
|
|
factory = nm_device_factory_manager_find_factory_for_connection (connection);
|
|
|
|
|
if (!factory)
|
|
|
|
|
return NULL;
|
|
|
|
|
} else
|
|
|
|
|
factory = cached_factory;
|
2012-02-12 14:48:44 -06:00
|
|
|
|
2014-09-18 17:50:47 -05:00
|
|
|
parent_name = nm_device_factory_get_connection_parent (factory, connection);
|
|
|
|
|
if (!parent_name)
|
|
|
|
|
return NULL;
|
2012-02-12 14:48:44 -06:00
|
|
|
|
2014-10-15 21:17:45 -05:00
|
|
|
/* Try as an interface name of a parent device */
|
|
|
|
|
parent = find_device_by_iface (self, parent_name, NULL, NULL);
|
2014-09-18 17:50:47 -05:00
|
|
|
if (parent)
|
|
|
|
|
return parent;
|
2012-02-12 14:48:44 -06:00
|
|
|
|
2014-09-18 17:50:47 -05:00
|
|
|
/* Maybe a hardware address */
|
2016-06-15 13:43:34 +02:00
|
|
|
parent = find_device_by_permanent_hw_addr (self, parent_name);
|
2014-09-18 17:50:47 -05:00
|
|
|
if (parent)
|
|
|
|
|
return parent;
|
|
|
|
|
|
|
|
|
|
/* Maybe a connection UUID */
|
2015-07-14 16:53:24 +02:00
|
|
|
parent_connection = nm_settings_get_connection_by_uuid (priv->settings, parent_name);
|
2014-09-18 17:50:47 -05:00
|
|
|
if (!parent_connection)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
2018-09-15 07:20:54 -04:00
|
|
|
/* Check if the parent connection is currently activated or is compatible
|
2014-09-18 17:50:47 -05:00
|
|
|
* with some known device.
|
|
|
|
|
*/
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_for_each_entry (candidate, &priv->devices_lst_head, devices_lst) {
|
2017-09-27 09:25:16 +02:00
|
|
|
/* Unmanaged devices are not compatible with any connection */
|
|
|
|
|
if (!nm_device_get_managed (candidate, FALSE))
|
|
|
|
|
continue;
|
|
|
|
|
|
2015-07-14 16:53:24 +02:00
|
|
|
if (nm_device_get_settings_connection (candidate) == parent_connection)
|
2014-09-18 17:50:47 -05:00
|
|
|
return candidate;
|
2013-06-10 17:32:23 -03:00
|
|
|
|
2014-09-18 17:50:47 -05:00
|
|
|
if ( !first_compatible
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
&& nm_device_check_connection_compatible (candidate,
|
|
|
|
|
nm_settings_connection_get_connection (parent_connection),
|
|
|
|
|
NULL))
|
2014-09-18 17:50:47 -05:00
|
|
|
first_compatible = candidate;
|
2013-06-10 17:32:23 -03:00
|
|
|
}
|
|
|
|
|
|
2014-09-18 17:50:47 -05:00
|
|
|
return first_compatible;
|
2013-06-10 17:32:23 -03:00
|
|
|
}
|
|
|
|
|
|
2012-02-10 13:25:39 -06:00
|
|
|
/**
|
2016-02-16 15:07:49 +01:00
|
|
|
* nm_manager_get_connection_iface:
|
2012-02-10 13:25:39 -06:00
|
|
|
* @self: the #NMManager
|
2016-02-16 15:07:49 +01:00
|
|
|
* @connection: the #NMConnection to get the interface for
|
2012-02-22 23:59:50 -06:00
|
|
|
* @out_parent: on success, the parent device if any
|
2014-09-18 17:50:47 -05:00
|
|
|
* @error: an error if determining the virtual interface name failed
|
2012-02-10 13:25:39 -06:00
|
|
|
*
|
|
|
|
|
* Given @connection, returns the interface name that the connection
|
2016-02-16 15:07:49 +01:00
|
|
|
* would need to use when activated. %NULL is returned if the name
|
|
|
|
|
* is not specified in connection or a the name for a virtual device
|
|
|
|
|
* could not be generated.
|
2012-02-10 13:25:39 -06:00
|
|
|
*
|
|
|
|
|
* Returns: the expected interface name (caller takes ownership), or %NULL
|
|
|
|
|
*/
|
2016-02-16 15:07:49 +01:00
|
|
|
char *
|
|
|
|
|
nm_manager_get_connection_iface (NMManager *self,
|
|
|
|
|
NMConnection *connection,
|
|
|
|
|
NMDevice **out_parent,
|
|
|
|
|
GError **error)
|
2012-02-10 13:25:39 -06:00
|
|
|
{
|
2014-09-18 17:50:47 -05:00
|
|
|
NMDeviceFactory *factory;
|
|
|
|
|
char *iface = NULL;
|
2012-02-22 23:59:50 -06:00
|
|
|
NMDevice *parent = NULL;
|
2012-02-12 14:48:44 -06:00
|
|
|
|
2012-02-22 23:59:50 -06:00
|
|
|
if (out_parent)
|
|
|
|
|
*out_parent = NULL;
|
2012-02-12 14:48:44 -06:00
|
|
|
|
2014-09-18 17:50:47 -05:00
|
|
|
factory = nm_device_factory_manager_find_factory_for_connection (connection);
|
|
|
|
|
if (!factory) {
|
2017-04-26 19:05:54 +02:00
|
|
|
if (nm_streq0 (nm_connection_get_connection_type (connection), NM_SETTING_GENERIC_SETTING_NAME)) {
|
|
|
|
|
/* the generic type doesn't have a factory. */
|
|
|
|
|
goto return_ifname_fom_connection;
|
|
|
|
|
}
|
|
|
|
|
|
2014-09-18 17:50:47 -05:00
|
|
|
g_set_error (error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_FAILED,
|
|
|
|
|
"NetworkManager plugin for '%s' unavailable",
|
|
|
|
|
nm_connection_get_connection_type (connection));
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
2014-08-04 16:27:43 -04:00
|
|
|
|
2016-02-17 16:12:46 +01:00
|
|
|
if ( !out_parent
|
2016-10-07 16:05:43 +02:00
|
|
|
&& !NM_DEVICE_FACTORY_GET_CLASS (factory)->get_connection_iface) {
|
2016-02-17 16:12:46 +01:00
|
|
|
/* optimization. Shortcut lookup of the partent device. */
|
2017-04-26 19:05:54 +02:00
|
|
|
goto return_ifname_fom_connection;
|
2016-02-17 16:12:46 +01:00
|
|
|
}
|
|
|
|
|
|
2016-02-17 15:18:37 +01:00
|
|
|
parent = find_parent_device_for_connection (self, connection, factory);
|
2016-02-17 15:11:02 +01:00
|
|
|
iface = nm_device_factory_get_connection_iface (factory,
|
|
|
|
|
connection,
|
|
|
|
|
parent ? nm_device_get_ip_iface (parent) : NULL,
|
|
|
|
|
error);
|
2016-01-21 17:36:08 +01:00
|
|
|
if (!iface)
|
2014-09-18 17:50:47 -05:00
|
|
|
return NULL;
|
2013-06-10 17:32:23 -03:00
|
|
|
|
2014-09-18 17:50:47 -05:00
|
|
|
if (out_parent)
|
|
|
|
|
*out_parent = parent;
|
|
|
|
|
return iface;
|
2017-04-26 19:05:54 +02:00
|
|
|
|
|
|
|
|
return_ifname_fom_connection:
|
|
|
|
|
iface = g_strdup (nm_connection_get_interface_name (connection));
|
|
|
|
|
if (!iface) {
|
|
|
|
|
g_set_error (error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_FAILED,
|
|
|
|
|
"failed to determine interface name: error determine name for %s",
|
|
|
|
|
nm_connection_get_connection_type (connection));
|
|
|
|
|
}
|
|
|
|
|
return iface;
|
2011-10-18 13:48:44 +02:00
|
|
|
}
|
|
|
|
|
|
2016-11-28 12:32:03 +00:00
|
|
|
/**
|
|
|
|
|
* nm_manager_iface_for_uuid:
|
|
|
|
|
* @self: the #NMManager
|
|
|
|
|
* @uuid: the connection uuid
|
|
|
|
|
*
|
|
|
|
|
* Gets a link name for the given UUID. Useful for the settings plugins that
|
|
|
|
|
* wish to write configuration files compatible with tooling that can't
|
|
|
|
|
* interpret our UUIDs.
|
|
|
|
|
*
|
|
|
|
|
* Returns: An interface name; %NULL if none matches
|
|
|
|
|
*/
|
|
|
|
|
const char *
|
|
|
|
|
nm_manager_iface_for_uuid (NMManager *self, const char *uuid)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *sett_conn;
|
2016-11-28 12:32:03 +00:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
sett_conn = nm_settings_get_connection_by_uuid (priv->settings, uuid);
|
|
|
|
|
if (!sett_conn)
|
2016-11-28 12:32:03 +00:00
|
|
|
return NULL;
|
|
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
return nm_connection_get_interface_name (nm_settings_connection_get_connection (sett_conn));
|
2016-11-28 12:32:03 +00:00
|
|
|
}
|
|
|
|
|
|
2017-10-10 15:00:59 +02:00
|
|
|
NMDevice *
|
|
|
|
|
nm_manager_get_device (NMManager *self, const char *ifname, NMDeviceType device_type)
|
2017-07-03 16:24:59 +02:00
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMDevice *device;
|
2017-07-03 16:24:59 +02:00
|
|
|
|
2017-10-10 15:00:59 +02:00
|
|
|
g_return_val_if_fail (ifname, NULL);
|
|
|
|
|
g_return_val_if_fail (device_type != NM_DEVICE_TYPE_UNKNOWN, NULL);
|
|
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst) {
|
|
|
|
|
if ( nm_device_get_device_type (device) == device_type
|
|
|
|
|
&& nm_streq0 (nm_device_get_iface (device), ifname))
|
|
|
|
|
return device;
|
2017-07-03 16:24:59 +02:00
|
|
|
}
|
|
|
|
|
|
2017-10-10 15:00:59 +02:00
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
gboolean
|
|
|
|
|
nm_manager_remove_device (NMManager *self, const char *ifname, NMDeviceType device_type)
|
|
|
|
|
{
|
|
|
|
|
NMDevice *d;
|
|
|
|
|
|
|
|
|
|
d = nm_manager_get_device (self, ifname, device_type);
|
|
|
|
|
if (!d)
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
|
|
|
|
remove_device (self, d, FALSE, FALSE);
|
|
|
|
|
return TRUE;
|
2017-07-03 16:24:59 +02:00
|
|
|
}
|
|
|
|
|
|
2012-02-10 13:25:39 -06:00
|
|
|
/**
|
|
|
|
|
* system_create_virtual_device:
|
|
|
|
|
* @self: the #NMManager
|
|
|
|
|
* @connection: the connection which might require a virtual device
|
|
|
|
|
*
|
|
|
|
|
* If @connection requires a virtual device and one does not yet exist for it,
|
|
|
|
|
* creates that device.
|
2015-12-17 18:40:36 +01:00
|
|
|
*
|
|
|
|
|
* Returns: A #NMDevice that was just realized; %NULL if none
|
2012-02-10 13:25:39 -06:00
|
|
|
*/
|
2015-12-17 18:40:36 +01:00
|
|
|
static NMDevice *
|
2015-12-11 16:13:13 +01:00
|
|
|
system_create_virtual_device (NMManager *self, NMConnection *connection)
|
2012-02-10 13:25:39 -06:00
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2014-09-17 14:17:30 -05:00
|
|
|
NMDeviceFactory *factory;
|
2017-02-03 14:15:16 +01:00
|
|
|
gs_free NMSettingsConnection **connections = NULL;
|
|
|
|
|
guint i;
|
2014-09-24 16:58:07 -05:00
|
|
|
gs_free char *iface = NULL;
|
2012-02-22 23:59:50 -06:00
|
|
|
NMDevice *device = NULL, *parent = NULL;
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMDevice *dev_candidate;
|
2015-12-11 16:13:13 +01:00
|
|
|
GError *error = NULL;
|
2017-09-14 09:26:51 +02:00
|
|
|
NMLogLevel log_level;
|
2012-02-10 13:25:39 -06:00
|
|
|
|
2015-12-18 12:09:38 +01:00
|
|
|
g_return_val_if_fail (NM_IS_MANAGER (self), NULL);
|
|
|
|
|
g_return_val_if_fail (NM_IS_CONNECTION (connection), NULL);
|
2015-01-19 13:11:29 +01:00
|
|
|
|
2016-02-16 15:07:49 +01:00
|
|
|
iface = nm_manager_get_connection_iface (self, connection, &parent, &error);
|
2015-12-11 16:13:13 +01:00
|
|
|
if (!iface) {
|
2017-06-07 12:46:10 +02:00
|
|
|
_LOG3D (LOGD_DEVICE, connection, "can't get a name of a virtual device: %s",
|
|
|
|
|
error->message);
|
2015-12-11 16:13:13 +01:00
|
|
|
g_error_free (error);
|
2015-12-17 18:40:36 +01:00
|
|
|
return NULL;
|
2015-12-11 16:13:13 +01:00
|
|
|
}
|
2011-10-18 13:48:44 +02:00
|
|
|
|
2015-12-11 15:40:34 +01:00
|
|
|
/* See if there's a device that is already compatible with this connection */
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_for_each_entry (dev_candidate, &priv->devices_lst_head, devices_lst) {
|
2018-06-27 17:00:55 +02:00
|
|
|
if (nm_device_check_connection_compatible (dev_candidate, connection, NULL)) {
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
if (nm_device_is_real (dev_candidate)) {
|
2017-06-07 12:46:10 +02:00
|
|
|
_LOG3D (LOGD_DEVICE, connection, "already created virtual interface name %s",
|
|
|
|
|
iface);
|
2015-12-17 18:40:36 +01:00
|
|
|
return NULL;
|
2015-12-11 15:40:34 +01:00
|
|
|
}
|
|
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
device = dev_candidate;
|
2015-12-11 15:40:34 +01:00
|
|
|
break;
|
2015-01-19 13:11:29 +01:00
|
|
|
}
|
2012-02-10 13:25:39 -06:00
|
|
|
}
|
|
|
|
|
|
2015-12-11 15:40:34 +01:00
|
|
|
if (!device) {
|
|
|
|
|
/* No matching device found. Proceed creating a new one. */
|
|
|
|
|
|
|
|
|
|
factory = nm_device_factory_manager_find_factory_for_connection (connection);
|
|
|
|
|
if (!factory) {
|
2017-06-07 12:46:10 +02:00
|
|
|
_LOG3E (LOGD_DEVICE, connection, "(%s) NetworkManager plugin for '%s' unavailable",
|
|
|
|
|
iface,
|
2016-03-02 11:38:26 +01:00
|
|
|
nm_connection_get_connection_type (connection));
|
2015-12-17 18:40:36 +01:00
|
|
|
return NULL;
|
2015-12-11 15:40:34 +01:00
|
|
|
}
|
2014-09-17 14:17:30 -05:00
|
|
|
|
2015-12-11 16:13:13 +01:00
|
|
|
device = nm_device_factory_create_device (factory, iface, NULL, connection, NULL, &error);
|
|
|
|
|
if (!device) {
|
2017-06-07 12:46:10 +02:00
|
|
|
_LOG3W (LOGD_DEVICE, connection, "factory can't create the device: %s",
|
|
|
|
|
error->message);
|
2015-12-11 16:13:13 +01:00
|
|
|
g_error_free (error);
|
2015-12-17 18:40:36 +01:00
|
|
|
return NULL;
|
2015-12-11 16:13:13 +01:00
|
|
|
}
|
2011-10-18 13:48:44 +02:00
|
|
|
|
2017-06-07 12:46:10 +02:00
|
|
|
_LOG3D (LOGD_DEVICE, connection, "create virtual device %s",
|
2016-03-29 15:39:05 +02:00
|
|
|
nm_device_get_iface (device));
|
|
|
|
|
|
2015-12-11 16:13:13 +01:00
|
|
|
if (!add_device (self, device, &error)) {
|
2017-06-07 12:46:10 +02:00
|
|
|
_LOG3W (LOGD_DEVICE, connection, "can't register the device with manager: %s",
|
|
|
|
|
error->message);
|
2015-12-11 16:13:13 +01:00
|
|
|
g_error_free (error);
|
2015-12-11 15:40:34 +01:00
|
|
|
g_object_unref (device);
|
2015-12-17 18:40:36 +01:00
|
|
|
return NULL;
|
2015-12-11 15:40:34 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Add device takes a reference that NMManager still owns, so it's
|
|
|
|
|
* safe to unref here and still return @device.
|
|
|
|
|
*/
|
2015-12-08 14:51:56 +01:00
|
|
|
g_object_unref (device);
|
|
|
|
|
}
|
2011-10-18 13:48:44 +02:00
|
|
|
|
2014-09-24 16:58:07 -05:00
|
|
|
/* Create backing resources if the device has any autoconnect connections */
|
2017-11-22 12:46:58 +01:00
|
|
|
connections = nm_settings_get_connections_clone (priv->settings, NULL,
|
|
|
|
|
NULL, NULL,
|
|
|
|
|
nm_settings_connection_cmp_autoconnect_priority_p_with_data, NULL);
|
2017-02-03 14:15:16 +01:00
|
|
|
for (i = 0; connections[i]; i++) {
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMConnection *candidate = nm_settings_connection_get_connection (connections[i]);
|
2014-09-24 16:58:07 -05:00
|
|
|
NMSettingConnection *s_con;
|
|
|
|
|
|
2018-06-27 17:00:55 +02:00
|
|
|
if (!nm_device_check_connection_compatible (device, candidate, NULL))
|
2014-09-24 16:58:07 -05:00
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
s_con = nm_connection_get_setting_connection (candidate);
|
|
|
|
|
g_assert (s_con);
|
|
|
|
|
if (!nm_setting_connection_get_autoconnect (s_con))
|
|
|
|
|
continue;
|
2013-12-04 15:58:32 +01:00
|
|
|
|
2014-09-24 16:58:07 -05:00
|
|
|
/* Create any backing resources the device needs */
|
2015-12-11 16:13:13 +01:00
|
|
|
if (!nm_device_create_and_realize (device, connection, parent, &error)) {
|
2017-09-14 09:26:51 +02:00
|
|
|
log_level = g_error_matches (error,
|
|
|
|
|
NM_DEVICE_ERROR,
|
|
|
|
|
NM_DEVICE_ERROR_MISSING_DEPENDENCIES)
|
|
|
|
|
? LOGL_DEBUG
|
|
|
|
|
: LOGL_ERR;
|
|
|
|
|
_NMLOG3 (log_level, LOGD_DEVICE, connection,
|
|
|
|
|
"couldn't create the device: %s",
|
|
|
|
|
error->message);
|
2015-12-11 16:13:13 +01:00
|
|
|
g_error_free (error);
|
2014-09-24 16:58:07 -05:00
|
|
|
remove_device (self, device, FALSE, TRUE);
|
2015-12-17 18:40:36 +01:00
|
|
|
return NULL;
|
2014-09-24 16:58:07 -05:00
|
|
|
}
|
2017-09-13 18:38:59 +02:00
|
|
|
|
|
|
|
|
retry_connections_for_parent_device (self, device);
|
2014-09-24 16:58:07 -05:00
|
|
|
break;
|
2011-10-18 13:48:44 +02:00
|
|
|
}
|
|
|
|
|
|
2015-12-17 18:40:36 +01:00
|
|
|
return device;
|
2011-10-18 13:48:44 +02:00
|
|
|
}
|
|
|
|
|
|
2016-01-07 17:54:38 +01:00
|
|
|
static void
|
|
|
|
|
retry_connections_for_parent_device (NMManager *self, NMDevice *device)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2017-02-03 14:15:16 +01:00
|
|
|
gs_free NMSettingsConnection **connections = NULL;
|
|
|
|
|
guint i;
|
2016-01-07 17:54:38 +01:00
|
|
|
|
|
|
|
|
g_return_if_fail (device);
|
|
|
|
|
|
2017-11-22 12:46:58 +01:00
|
|
|
connections = nm_settings_get_connections_clone (priv->settings, NULL,
|
|
|
|
|
NULL, NULL,
|
|
|
|
|
nm_settings_connection_cmp_autoconnect_priority_p_with_data, NULL);
|
2017-02-03 14:15:16 +01:00
|
|
|
for (i = 0; connections[i]; i++) {
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *sett_conn = connections[i];
|
|
|
|
|
NMConnection *connection = nm_settings_connection_get_connection (sett_conn);
|
2016-03-08 12:02:54 +01:00
|
|
|
gs_free_error GError *error = NULL;
|
|
|
|
|
gs_free char *ifname = NULL;
|
2016-01-07 17:54:38 +01:00
|
|
|
NMDevice *parent;
|
|
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
parent = find_parent_device_for_connection (self, connection, NULL);
|
2016-03-08 12:02:54 +01:00
|
|
|
if (parent == device) {
|
|
|
|
|
/* Only try to activate devices that don't already exist */
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
ifname = nm_manager_get_connection_iface (self, connection, &parent, &error);
|
2016-03-08 12:02:54 +01:00
|
|
|
if (ifname) {
|
|
|
|
|
if (!nm_platform_link_get_by_ifname (NM_PLATFORM_GET, ifname))
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
connection_changed (self, sett_conn);
|
2016-03-08 12:02:54 +01:00
|
|
|
}
|
|
|
|
|
}
|
2016-01-07 17:54:38 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2011-10-18 13:48:44 +02:00
|
|
|
static void
|
2016-04-13 16:03:06 +02:00
|
|
|
connection_changed (NMManager *self,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *sett_conn)
|
2008-04-29 Dan Williams <dcbw@redhat.com>
Handle HAL dropouts better; allow NM to start up even if HAL isn't up yet.
* marshallers/nm-marshal.list
- Add marshaller
* src/NetworkManager.c
- (main): let the NMManager handle the NMHalManager
* src/nm-hal-manager.c
src/nm-hal-manager.h
- convert to a GObject, and emit singals when stuff changes. Let the
NMManager handle the signals, instead of the NMHalManager calling
into the NMManager.
* src/nm-manager.c
src/nm-manager.h
- (remove_one_device): consolidate device removals here
- (dispose): use remove_one_device()
- (nm_manager_get_device_by_udi): make static
- (deferred_hal_manager_query_devices): idle handler to query the HAL
manager for devices at startup or wakeup time
- (nm_manager_new): create and monitor the HAL manager
- (hal_manager_udi_added_cb): new function; do what
nm_manager_add_device() used to do when signalled by the hal manager
- (hal_manager_udi_removed_cb): new function; do what
nm_manager_remove_device() used to do when signalled by the hal
manager
- (hal_manager_rfkill_changed_cb): handle rfkill changes from the
hal manager
- (hal_manager_hal_reappeared_cb): when HAL comes back, remove devices
in our device list that aren't known to HAL
- (impl_manager_sleep): on wakeup, re-add devices from an idle handler;
see comments on nm-hal-manager.c::nm_manager_state_changed() a few
commits ago
- (nm_manager_get_device_by_path, nm_manager_is_udi_managed,
nm_manager_activation_pending, nm_manager_wireless_enabled,
nm_manager_wireless_hardware_enabled,
nm_manager_set_wireless_hardware_enabled): remove, unused
git-svn-id: http://svn-archive.gnome.org/svn/NetworkManager/trunk@3619 4912f4e0-d625-0410-9fb7-b9a5a253dbdc
2008-04-29 23:03:00 +00:00
|
|
|
{
|
2015-12-17 18:40:36 +01:00
|
|
|
NMDevice *device;
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMConnection *connection = nm_settings_connection_get_connection (sett_conn);
|
2015-12-17 18:40:36 +01:00
|
|
|
|
|
|
|
|
if (!nm_connection_is_virtual (connection))
|
|
|
|
|
return;
|
|
|
|
|
|
2016-04-13 16:03:06 +02:00
|
|
|
device = system_create_virtual_device (self, connection);
|
2015-12-17 18:40:36 +01:00
|
|
|
if (!device)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
/* Maybe the device that was created was needed by some other
|
|
|
|
|
* connection's device (parent of a VLAN). Let the connections
|
|
|
|
|
* can use the newly created device as a parent know. */
|
2016-04-13 16:03:06 +02:00
|
|
|
retry_connections_for_parent_device (self, device);
|
2011-10-18 13:48:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2016-04-13 16:03:06 +02:00
|
|
|
connection_added_cb (NMSettings *settings,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *sett_conn,
|
2016-04-13 16:03:06 +02:00
|
|
|
NMManager *self)
|
2011-10-18 13:48:44 +02:00
|
|
|
{
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
connection_changed (self, sett_conn);
|
2016-04-13 16:03:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
connection_updated_cb (NMSettings *settings,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *sett_conn,
|
2016-04-13 16:03:06 +02:00
|
|
|
gboolean by_user,
|
|
|
|
|
NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
if (by_user)
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
connection_changed (self, sett_conn);
|
2008-05-22 14:22:31 +00:00
|
|
|
}
|
|
|
|
|
|
2017-12-05 13:55:25 +01:00
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
|
CList delete_volatile_connection_lst;
|
|
|
|
|
NMSettingsConnection *connection;
|
|
|
|
|
} DeleteVolatileConnectionData;
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
_delete_volatile_connection_all (NMManager *self, gboolean do_delete)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
CList *lst;
|
|
|
|
|
DeleteVolatileConnectionData *data;
|
|
|
|
|
|
|
|
|
|
while ((lst = c_list_first (&priv->delete_volatile_connection_lst_head))) {
|
|
|
|
|
gs_unref_object NMSettingsConnection *connection = NULL;
|
|
|
|
|
|
|
|
|
|
data = c_list_entry (lst,
|
|
|
|
|
DeleteVolatileConnectionData,
|
|
|
|
|
delete_volatile_connection_lst);
|
|
|
|
|
connection = data->connection;
|
|
|
|
|
c_list_unlink_stale (&data->delete_volatile_connection_lst);
|
|
|
|
|
g_slice_free (DeleteVolatileConnectionData, data);
|
|
|
|
|
|
|
|
|
|
if (do_delete)
|
|
|
|
|
_delete_volatile_connection_do (self, connection);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
_delete_volatile_connection_cb (gpointer user_data)
|
|
|
|
|
{
|
|
|
|
|
NMManager *self = user_data;
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
|
|
|
|
|
priv->delete_volatile_connection_idle_id = 0;
|
|
|
|
|
_delete_volatile_connection_all (self, TRUE);
|
|
|
|
|
return G_SOURCE_REMOVE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
connection_flags_changed (NMSettings *settings,
|
|
|
|
|
NMSettingsConnection *connection,
|
|
|
|
|
gpointer user_data)
|
|
|
|
|
{
|
|
|
|
|
NMManager *self = user_data;
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
DeleteVolatileConnectionData *data;
|
|
|
|
|
|
|
|
|
|
if (!NM_FLAGS_HAS (nm_settings_connection_get_flags (connection),
|
2018-04-05 20:02:45 +02:00
|
|
|
NM_SETTINGS_CONNECTION_INT_FLAGS_VOLATILE))
|
2017-12-05 13:55:25 +01:00
|
|
|
return;
|
|
|
|
|
|
2018-04-19 15:27:54 +02:00
|
|
|
if (active_connection_find (self, connection, NULL, NM_ACTIVE_CONNECTION_STATE_DEACTIVATED, NULL)) {
|
2018-04-19 15:42:27 +02:00
|
|
|
/* the connection still has an active-connection. It will be purged
|
2017-12-05 13:55:25 +01:00
|
|
|
* when the active connection(s) get(s) removed. */
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
data = g_slice_new (DeleteVolatileConnectionData);
|
|
|
|
|
data->connection = g_object_ref (connection);
|
|
|
|
|
c_list_link_tail (&priv->delete_volatile_connection_lst_head, &data->delete_volatile_connection_lst);
|
|
|
|
|
if (!priv->delete_volatile_connection_idle_id)
|
|
|
|
|
priv->delete_volatile_connection_idle_id = g_idle_add (_delete_volatile_connection_cb, self);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2008-04-07 Dan Williams <dcbw@redhat.com>
* include/NetworkManager.h
- Remove the DOWN and CANCELLED device states
- Add UNMANAGED and UNAVAILABLE device states
- Document the device states
* introspection/nm-device.xml
src/nm-device-interface.c
src/nm-device-interface.h
- Add the 'managed' property
* test/nm-tool.c
- (detail_device): print out device state
* src/NetworkManagerSystem.h
src/backends/NetworkManagerArch.c
src/backends/NetworkManagerDebian.c
src/backends/NetworkManagerFrugalware.c
src/backends/NetworkManagerGentoo.c
src/backends/NetworkManagerMandriva.c
src/backends/NetworkManagerPaldo.c
src/backends/NetworkManagerRedHat.c
src/backends/NetworkManagerSlackware.c
src/backends/NetworkManagerSuSE.c
- (nm_system_device_get_system_config, nm_system_device_get_disabled
nm_system_device_free_system_config): remove; they were unused and
their functionality should be re-implemented in each distro's
system settings service plugin
* src/nm-gsm-device.c
src/nm-gsm-device.h
src/nm-cdma-device.c
src/nm-cdma-device.h
- (*_new): take the 'managed' argument
* src/nm-device.c
- (nm_device_set_address): remove, fold into nm_device_bring_up()
- (nm_device_init): start in unmanaged state, not disconnected
- (constructor): don't start device until the system settings service
has had a chance to figure out if the device is managed or not
- (nm_device_deactivate, nm_device_bring_up, nm_device_bring_down):
don't set device state here, let callers handle that as appropriate
- (nm_device_dispose): don't touch the device if it's not managed
- (set_property, get_property, nm_device_class_init): implement the
'managed' property
- (nm_device_state_changed): bring the device up if its now managed,
and deactivate it if it used to be active
- (nm_device_get_managed, nm_device_set_managed): do the right thing
with the managed state
* src/nm-hal-manager.c
- (wired_device_creator, wireless_device_creator, modem_device_creator):
take initial managed state and pass it along to device constructors
- (create_device_and_add_to_list): get managed state and pass to
type creators
* src/nm-device-802-11-wireless.c
- (real_can_activate): fold in most of
nm_device_802_11_wireless_can_activate()
- (can_scan): can't scan in UNAVAILABLE or UNMANAGED
- (link_timeout_cb): instead of deactivating, change device state and
let the device state handler to it
- (real_update_hw_address): clean up
- (state_changed_cb): when entering UNAVAILABLE state, schedule an idle
handler to transition to DISCONNECTED if the device isn't rfkilled
* src/nm-device-802-3-ethernet.c
- (set_carrier): move above callers and get rid of prototype
- (device_state_changed): when entering UNAVAILABLE state, schedule an
idle handler to transition to DISCONNECTED if the device has a
carrier
- (real_update_hw_address): clean up
- (link_timeout_cb, ppp_state_changed): change state instead of calling
deactivation directly as deactivation doesn't change state anymore
* src/NetworkManagerPolicy.c
- (schedule_activate_check): yay, remove wireless_enabled hack since
the NMManager and wireless devices work that out themselves now
- (device_state_changed): change to a switch and update for new device
states
- (device_carrier_changed): remove; device handles this now through
state changes
- (device_added): don't care about carrier any more; the initial
activation check will happen when the device transitions to
DISCONNECTED
* src/nm-manager.c
- (dispose): clear unmanaged devices
- (handle_unmanaged_devices): update unmanaged device list and toggle
the managed property on each device when needed
- (system_settings_properties_changed_cb): handle signals from the
system settings service
- (system_settings_get_unmanaged_devices_cb): handle callback from
getting the unmanaged device list method call
- (query_unmanaged_devices): ask the system settings service for its
list of unmanaged devices
- (nm_manager_name_owner_changed, initial_get_connections): get unmanaged
devices
- (manager_set_wireless_enabled): push rfkill state down to wireless
devices directly and let them handle the necessary state transitions
- (manager_device_state_changed): update for new device states
- (nm_manager_add_device): set initial rfkill state on wireless devices
- (nm_manager_remove_device): don't touch the device if it's unmanaged
- (nm_manager_activate_connection): return error if the device is
unmanaged
- (nm_manager_sleep): handle new device states correctly; don't change
the state of unavailable/unmanaged devices
* libnm-glib/nm-device-802-11-wireless.c
- (state_changed_cb): update for new device states
git-svn-id: http://svn-archive.gnome.org/svn/NetworkManager/trunk@3540 4912f4e0-d625-0410-9fb7-b9a5a253dbdc
2008-04-08 02:58:02 +00:00
|
|
|
static void
|
2010-10-27 20:05:23 -05:00
|
|
|
system_unmanaged_devices_changed_cb (NMSettings *settings,
|
2009-06-11 00:39:12 -04:00
|
|
|
GParamSpec *pspec,
|
|
|
|
|
gpointer user_data)
|
2008-04-07 Dan Williams <dcbw@redhat.com>
* include/NetworkManager.h
- Remove the DOWN and CANCELLED device states
- Add UNMANAGED and UNAVAILABLE device states
- Document the device states
* introspection/nm-device.xml
src/nm-device-interface.c
src/nm-device-interface.h
- Add the 'managed' property
* test/nm-tool.c
- (detail_device): print out device state
* src/NetworkManagerSystem.h
src/backends/NetworkManagerArch.c
src/backends/NetworkManagerDebian.c
src/backends/NetworkManagerFrugalware.c
src/backends/NetworkManagerGentoo.c
src/backends/NetworkManagerMandriva.c
src/backends/NetworkManagerPaldo.c
src/backends/NetworkManagerRedHat.c
src/backends/NetworkManagerSlackware.c
src/backends/NetworkManagerSuSE.c
- (nm_system_device_get_system_config, nm_system_device_get_disabled
nm_system_device_free_system_config): remove; they were unused and
their functionality should be re-implemented in each distro's
system settings service plugin
* src/nm-gsm-device.c
src/nm-gsm-device.h
src/nm-cdma-device.c
src/nm-cdma-device.h
- (*_new): take the 'managed' argument
* src/nm-device.c
- (nm_device_set_address): remove, fold into nm_device_bring_up()
- (nm_device_init): start in unmanaged state, not disconnected
- (constructor): don't start device until the system settings service
has had a chance to figure out if the device is managed or not
- (nm_device_deactivate, nm_device_bring_up, nm_device_bring_down):
don't set device state here, let callers handle that as appropriate
- (nm_device_dispose): don't touch the device if it's not managed
- (set_property, get_property, nm_device_class_init): implement the
'managed' property
- (nm_device_state_changed): bring the device up if its now managed,
and deactivate it if it used to be active
- (nm_device_get_managed, nm_device_set_managed): do the right thing
with the managed state
* src/nm-hal-manager.c
- (wired_device_creator, wireless_device_creator, modem_device_creator):
take initial managed state and pass it along to device constructors
- (create_device_and_add_to_list): get managed state and pass to
type creators
* src/nm-device-802-11-wireless.c
- (real_can_activate): fold in most of
nm_device_802_11_wireless_can_activate()
- (can_scan): can't scan in UNAVAILABLE or UNMANAGED
- (link_timeout_cb): instead of deactivating, change device state and
let the device state handler to it
- (real_update_hw_address): clean up
- (state_changed_cb): when entering UNAVAILABLE state, schedule an idle
handler to transition to DISCONNECTED if the device isn't rfkilled
* src/nm-device-802-3-ethernet.c
- (set_carrier): move above callers and get rid of prototype
- (device_state_changed): when entering UNAVAILABLE state, schedule an
idle handler to transition to DISCONNECTED if the device has a
carrier
- (real_update_hw_address): clean up
- (link_timeout_cb, ppp_state_changed): change state instead of calling
deactivation directly as deactivation doesn't change state anymore
* src/NetworkManagerPolicy.c
- (schedule_activate_check): yay, remove wireless_enabled hack since
the NMManager and wireless devices work that out themselves now
- (device_state_changed): change to a switch and update for new device
states
- (device_carrier_changed): remove; device handles this now through
state changes
- (device_added): don't care about carrier any more; the initial
activation check will happen when the device transitions to
DISCONNECTED
* src/nm-manager.c
- (dispose): clear unmanaged devices
- (handle_unmanaged_devices): update unmanaged device list and toggle
the managed property on each device when needed
- (system_settings_properties_changed_cb): handle signals from the
system settings service
- (system_settings_get_unmanaged_devices_cb): handle callback from
getting the unmanaged device list method call
- (query_unmanaged_devices): ask the system settings service for its
list of unmanaged devices
- (nm_manager_name_owner_changed, initial_get_connections): get unmanaged
devices
- (manager_set_wireless_enabled): push rfkill state down to wireless
devices directly and let them handle the necessary state transitions
- (manager_device_state_changed): update for new device states
- (nm_manager_add_device): set initial rfkill state on wireless devices
- (nm_manager_remove_device): don't touch the device if it's unmanaged
- (nm_manager_activate_connection): return error if the device is
unmanaged
- (nm_manager_sleep): handle new device states correctly; don't change
the state of unavailable/unmanaged devices
* libnm-glib/nm-device-802-11-wireless.c
- (state_changed_cb): update for new device states
git-svn-id: http://svn-archive.gnome.org/svn/NetworkManager/trunk@3540 4912f4e0-d625-0410-9fb7-b9a5a253dbdc
2008-04-08 02:58:02 +00:00
|
|
|
{
|
2010-10-27 20:05:23 -05:00
|
|
|
NMManager *self = NM_MANAGER (user_data);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMDevice *device;
|
2008-04-07 Dan Williams <dcbw@redhat.com>
* include/NetworkManager.h
- Remove the DOWN and CANCELLED device states
- Add UNMANAGED and UNAVAILABLE device states
- Document the device states
* introspection/nm-device.xml
src/nm-device-interface.c
src/nm-device-interface.h
- Add the 'managed' property
* test/nm-tool.c
- (detail_device): print out device state
* src/NetworkManagerSystem.h
src/backends/NetworkManagerArch.c
src/backends/NetworkManagerDebian.c
src/backends/NetworkManagerFrugalware.c
src/backends/NetworkManagerGentoo.c
src/backends/NetworkManagerMandriva.c
src/backends/NetworkManagerPaldo.c
src/backends/NetworkManagerRedHat.c
src/backends/NetworkManagerSlackware.c
src/backends/NetworkManagerSuSE.c
- (nm_system_device_get_system_config, nm_system_device_get_disabled
nm_system_device_free_system_config): remove; they were unused and
their functionality should be re-implemented in each distro's
system settings service plugin
* src/nm-gsm-device.c
src/nm-gsm-device.h
src/nm-cdma-device.c
src/nm-cdma-device.h
- (*_new): take the 'managed' argument
* src/nm-device.c
- (nm_device_set_address): remove, fold into nm_device_bring_up()
- (nm_device_init): start in unmanaged state, not disconnected
- (constructor): don't start device until the system settings service
has had a chance to figure out if the device is managed or not
- (nm_device_deactivate, nm_device_bring_up, nm_device_bring_down):
don't set device state here, let callers handle that as appropriate
- (nm_device_dispose): don't touch the device if it's not managed
- (set_property, get_property, nm_device_class_init): implement the
'managed' property
- (nm_device_state_changed): bring the device up if its now managed,
and deactivate it if it used to be active
- (nm_device_get_managed, nm_device_set_managed): do the right thing
with the managed state
* src/nm-hal-manager.c
- (wired_device_creator, wireless_device_creator, modem_device_creator):
take initial managed state and pass it along to device constructors
- (create_device_and_add_to_list): get managed state and pass to
type creators
* src/nm-device-802-11-wireless.c
- (real_can_activate): fold in most of
nm_device_802_11_wireless_can_activate()
- (can_scan): can't scan in UNAVAILABLE or UNMANAGED
- (link_timeout_cb): instead of deactivating, change device state and
let the device state handler to it
- (real_update_hw_address): clean up
- (state_changed_cb): when entering UNAVAILABLE state, schedule an idle
handler to transition to DISCONNECTED if the device isn't rfkilled
* src/nm-device-802-3-ethernet.c
- (set_carrier): move above callers and get rid of prototype
- (device_state_changed): when entering UNAVAILABLE state, schedule an
idle handler to transition to DISCONNECTED if the device has a
carrier
- (real_update_hw_address): clean up
- (link_timeout_cb, ppp_state_changed): change state instead of calling
deactivation directly as deactivation doesn't change state anymore
* src/NetworkManagerPolicy.c
- (schedule_activate_check): yay, remove wireless_enabled hack since
the NMManager and wireless devices work that out themselves now
- (device_state_changed): change to a switch and update for new device
states
- (device_carrier_changed): remove; device handles this now through
state changes
- (device_added): don't care about carrier any more; the initial
activation check will happen when the device transitions to
DISCONNECTED
* src/nm-manager.c
- (dispose): clear unmanaged devices
- (handle_unmanaged_devices): update unmanaged device list and toggle
the managed property on each device when needed
- (system_settings_properties_changed_cb): handle signals from the
system settings service
- (system_settings_get_unmanaged_devices_cb): handle callback from
getting the unmanaged device list method call
- (query_unmanaged_devices): ask the system settings service for its
list of unmanaged devices
- (nm_manager_name_owner_changed, initial_get_connections): get unmanaged
devices
- (manager_set_wireless_enabled): push rfkill state down to wireless
devices directly and let them handle the necessary state transitions
- (manager_device_state_changed): update for new device states
- (nm_manager_add_device): set initial rfkill state on wireless devices
- (nm_manager_remove_device): don't touch the device if it's unmanaged
- (nm_manager_activate_connection): return error if the device is
unmanaged
- (nm_manager_sleep): handle new device states correctly; don't change
the state of unavailable/unmanaged devices
* libnm-glib/nm-device-802-11-wireless.c
- (state_changed_cb): update for new device states
git-svn-id: http://svn-archive.gnome.org/svn/NetworkManager/trunk@3540 4912f4e0-d625-0410-9fb7-b9a5a253dbdc
2008-04-08 02:58:02 +00:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst)
|
|
|
|
|
nm_device_set_unmanaged_by_user_settings (device);
|
2008-09-18 Dan Williams <dcbw@redhat.com>
Implement support for honoring configured and automatic hostnames, and for
setting the configured hostname.
* introspection/nm-ip4-config.xml
src/nm-ip4-config.c
src/nm-ip4-config.h
src/dhcp-manager/nm-dhcp-manager.c
- Remove useless hostname property; it's not really part of the IPv4
config
* introspection/nm-settings-system.xml
libnm-glib/nm-dbus-settings-system.c
libnm-glib/nm-dbus-settings-system.h
- Add SetHostname() call to system settings D-Bus interface
- Add Hostname property to system settings D-Bus interface
- (nm_dbus_settings_system_save_hostname,
nm_dbus_settings_system_get_hostname): implement
* src/nm-device.c
src/nm-device.h
- (nm_device_get_dhcp4_config): implement
* src/nm-manager.c
src/nm-manager.h
- Fetch and track system settings service hostname changes, and proxy
the changes via a GObject property of the manager
* system-settings/src/nm-system-config-interface.c
system-settings/src/nm-system-config-interface.h
- Replace nm_system_config_interface_supports_add() with a capabilities
bitfield
* system-settings/src/nm-system-config-error.c
system-settings/src/nm-system-config-error.h
- Add additional errors
* system-settings/src/dbus-settings.c
system-settings/src/dbus-settings.h
- (get_property, nm_sysconfig_settings_class_init): add hostname
property; first plugin returning a hostname wins
- (impl_settings_add_connection): use plugin capabilities instead of
nm_system_config_interface_supports_add()
- (impl_settings_save_hostname): implement hostname saving
* src/NetworkManagerPolicy.c
- (lookup_thread_run_cb, lookup_thread_worker, lookup_thread_new,
lookup_thread_die): implement an asynchronous hostname lookup thread
which given an IPv4 address tries to look up the hostname for that
address with reverse DNS
- (get_best_device): split out best device code from
update_routing_and_dns()
- (update_etc_hosts): update /etc/hosts with the machine's new hostname
to preserve the 127.0.0.1 reverse mapping that so many things require
- (set_system_hostname): set a given hostname
- (update_system_hostname): implement hostname policy; a configured
hostname (from the system settings service) is used if available,
otherwise an automatically determined hostname from DHCP, VPN, etc.
If there was no automatically determined hostname, reverse DNS of
the best device's IP address will be used, and as a last resort the
hostname 'localhost.localdomain' is set.
- (update_routing_and_dns): use get_best_device(); update the system
hostname when the network config changes
- (hostname_changed): update system hostname if the system settings
service signals a hostname change
- (nm_policy_new): list for system settings service hostname changes
- (nm_policy_destroy): ensure that an in-progress hostname lookup thread
gets told to die
* system-settings/plugins/keyfile/plugin.c
system-settings/plugins/ifcfg-suse/plugin.c
- (get_property, sc_plugin_ifcfg_class_init): implement hostname and
capabilities properties
* system-settings/plugins/ifcfg-fedora/shvar.c
- (svOpenFile): re-enable R/W access of ifcfg files since the plugin
writes out /etc/sysconfig/network now
* system-settings/plugins/ifcfg-fedora/plugin.c
- (plugin_get_hostname): get hostname from /etc/sysconfig/network
- (plugin_set_hostname): save hostname to /etc/sysconfig/network
- (sc_network_changed_cb): handle changes to /etc/sysconfig/network
- (sc_plugin_ifcfg_init): monitor /etc/sysconfig/network for changes
- (get_property, set_property, sc_plugin_ifcfg_class_init): implement
hostname get/set and capabilities get
git-svn-id: http://svn-archive.gnome.org/svn/NetworkManager/trunk@4077 4912f4e0-d625-0410-9fb7-b9a5a253dbdc
2008-09-18 15:16:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2017-04-23 14:20:37 +02:00
|
|
|
hostname_changed_cb (NMHostnameManager *hostname_manager,
|
|
|
|
|
GParamSpec *pspec,
|
|
|
|
|
NMManager *self)
|
2008-09-18 Dan Williams <dcbw@redhat.com>
Implement support for honoring configured and automatic hostnames, and for
setting the configured hostname.
* introspection/nm-ip4-config.xml
src/nm-ip4-config.c
src/nm-ip4-config.h
src/dhcp-manager/nm-dhcp-manager.c
- Remove useless hostname property; it's not really part of the IPv4
config
* introspection/nm-settings-system.xml
libnm-glib/nm-dbus-settings-system.c
libnm-glib/nm-dbus-settings-system.h
- Add SetHostname() call to system settings D-Bus interface
- Add Hostname property to system settings D-Bus interface
- (nm_dbus_settings_system_save_hostname,
nm_dbus_settings_system_get_hostname): implement
* src/nm-device.c
src/nm-device.h
- (nm_device_get_dhcp4_config): implement
* src/nm-manager.c
src/nm-manager.h
- Fetch and track system settings service hostname changes, and proxy
the changes via a GObject property of the manager
* system-settings/src/nm-system-config-interface.c
system-settings/src/nm-system-config-interface.h
- Replace nm_system_config_interface_supports_add() with a capabilities
bitfield
* system-settings/src/nm-system-config-error.c
system-settings/src/nm-system-config-error.h
- Add additional errors
* system-settings/src/dbus-settings.c
system-settings/src/dbus-settings.h
- (get_property, nm_sysconfig_settings_class_init): add hostname
property; first plugin returning a hostname wins
- (impl_settings_add_connection): use plugin capabilities instead of
nm_system_config_interface_supports_add()
- (impl_settings_save_hostname): implement hostname saving
* src/NetworkManagerPolicy.c
- (lookup_thread_run_cb, lookup_thread_worker, lookup_thread_new,
lookup_thread_die): implement an asynchronous hostname lookup thread
which given an IPv4 address tries to look up the hostname for that
address with reverse DNS
- (get_best_device): split out best device code from
update_routing_and_dns()
- (update_etc_hosts): update /etc/hosts with the machine's new hostname
to preserve the 127.0.0.1 reverse mapping that so many things require
- (set_system_hostname): set a given hostname
- (update_system_hostname): implement hostname policy; a configured
hostname (from the system settings service) is used if available,
otherwise an automatically determined hostname from DHCP, VPN, etc.
If there was no automatically determined hostname, reverse DNS of
the best device's IP address will be used, and as a last resort the
hostname 'localhost.localdomain' is set.
- (update_routing_and_dns): use get_best_device(); update the system
hostname when the network config changes
- (hostname_changed): update system hostname if the system settings
service signals a hostname change
- (nm_policy_new): list for system settings service hostname changes
- (nm_policy_destroy): ensure that an in-progress hostname lookup thread
gets told to die
* system-settings/plugins/keyfile/plugin.c
system-settings/plugins/ifcfg-suse/plugin.c
- (get_property, sc_plugin_ifcfg_class_init): implement hostname and
capabilities properties
* system-settings/plugins/ifcfg-fedora/shvar.c
- (svOpenFile): re-enable R/W access of ifcfg files since the plugin
writes out /etc/sysconfig/network now
* system-settings/plugins/ifcfg-fedora/plugin.c
- (plugin_get_hostname): get hostname from /etc/sysconfig/network
- (plugin_set_hostname): save hostname to /etc/sysconfig/network
- (sc_network_changed_cb): handle changes to /etc/sysconfig/network
- (sc_plugin_ifcfg_init): monitor /etc/sysconfig/network for changes
- (get_property, set_property, sc_plugin_ifcfg_class_init): implement
hostname get/set and capabilities get
git-svn-id: http://svn-archive.gnome.org/svn/NetworkManager/trunk@4077 4912f4e0-d625-0410-9fb7-b9a5a253dbdc
2008-09-18 15:16:44 +00:00
|
|
|
{
|
2010-10-27 20:05:23 -05:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2017-04-23 14:20:37 +02:00
|
|
|
const char *hostname;
|
2008-04-07 Dan Williams <dcbw@redhat.com>
* include/NetworkManager.h
- Remove the DOWN and CANCELLED device states
- Add UNMANAGED and UNAVAILABLE device states
- Document the device states
* introspection/nm-device.xml
src/nm-device-interface.c
src/nm-device-interface.h
- Add the 'managed' property
* test/nm-tool.c
- (detail_device): print out device state
* src/NetworkManagerSystem.h
src/backends/NetworkManagerArch.c
src/backends/NetworkManagerDebian.c
src/backends/NetworkManagerFrugalware.c
src/backends/NetworkManagerGentoo.c
src/backends/NetworkManagerMandriva.c
src/backends/NetworkManagerPaldo.c
src/backends/NetworkManagerRedHat.c
src/backends/NetworkManagerSlackware.c
src/backends/NetworkManagerSuSE.c
- (nm_system_device_get_system_config, nm_system_device_get_disabled
nm_system_device_free_system_config): remove; they were unused and
their functionality should be re-implemented in each distro's
system settings service plugin
* src/nm-gsm-device.c
src/nm-gsm-device.h
src/nm-cdma-device.c
src/nm-cdma-device.h
- (*_new): take the 'managed' argument
* src/nm-device.c
- (nm_device_set_address): remove, fold into nm_device_bring_up()
- (nm_device_init): start in unmanaged state, not disconnected
- (constructor): don't start device until the system settings service
has had a chance to figure out if the device is managed or not
- (nm_device_deactivate, nm_device_bring_up, nm_device_bring_down):
don't set device state here, let callers handle that as appropriate
- (nm_device_dispose): don't touch the device if it's not managed
- (set_property, get_property, nm_device_class_init): implement the
'managed' property
- (nm_device_state_changed): bring the device up if its now managed,
and deactivate it if it used to be active
- (nm_device_get_managed, nm_device_set_managed): do the right thing
with the managed state
* src/nm-hal-manager.c
- (wired_device_creator, wireless_device_creator, modem_device_creator):
take initial managed state and pass it along to device constructors
- (create_device_and_add_to_list): get managed state and pass to
type creators
* src/nm-device-802-11-wireless.c
- (real_can_activate): fold in most of
nm_device_802_11_wireless_can_activate()
- (can_scan): can't scan in UNAVAILABLE or UNMANAGED
- (link_timeout_cb): instead of deactivating, change device state and
let the device state handler to it
- (real_update_hw_address): clean up
- (state_changed_cb): when entering UNAVAILABLE state, schedule an idle
handler to transition to DISCONNECTED if the device isn't rfkilled
* src/nm-device-802-3-ethernet.c
- (set_carrier): move above callers and get rid of prototype
- (device_state_changed): when entering UNAVAILABLE state, schedule an
idle handler to transition to DISCONNECTED if the device has a
carrier
- (real_update_hw_address): clean up
- (link_timeout_cb, ppp_state_changed): change state instead of calling
deactivation directly as deactivation doesn't change state anymore
* src/NetworkManagerPolicy.c
- (schedule_activate_check): yay, remove wireless_enabled hack since
the NMManager and wireless devices work that out themselves now
- (device_state_changed): change to a switch and update for new device
states
- (device_carrier_changed): remove; device handles this now through
state changes
- (device_added): don't care about carrier any more; the initial
activation check will happen when the device transitions to
DISCONNECTED
* src/nm-manager.c
- (dispose): clear unmanaged devices
- (handle_unmanaged_devices): update unmanaged device list and toggle
the managed property on each device when needed
- (system_settings_properties_changed_cb): handle signals from the
system settings service
- (system_settings_get_unmanaged_devices_cb): handle callback from
getting the unmanaged device list method call
- (query_unmanaged_devices): ask the system settings service for its
list of unmanaged devices
- (nm_manager_name_owner_changed, initial_get_connections): get unmanaged
devices
- (manager_set_wireless_enabled): push rfkill state down to wireless
devices directly and let them handle the necessary state transitions
- (manager_device_state_changed): update for new device states
- (nm_manager_add_device): set initial rfkill state on wireless devices
- (nm_manager_remove_device): don't touch the device if it's unmanaged
- (nm_manager_activate_connection): return error if the device is
unmanaged
- (nm_manager_sleep): handle new device states correctly; don't change
the state of unavailable/unmanaged devices
* libnm-glib/nm-device-802-11-wireless.c
- (state_changed_cb): update for new device states
git-svn-id: http://svn-archive.gnome.org/svn/NetworkManager/trunk@3540 4912f4e0-d625-0410-9fb7-b9a5a253dbdc
2008-04-08 02:58:02 +00:00
|
|
|
|
2017-04-23 14:20:37 +02:00
|
|
|
hostname = nm_hostname_manager_get_hostname (priv->hostname_manager);
|
2008-09-18 Dan Williams <dcbw@redhat.com>
Implement support for honoring configured and automatic hostnames, and for
setting the configured hostname.
* introspection/nm-ip4-config.xml
src/nm-ip4-config.c
src/nm-ip4-config.h
src/dhcp-manager/nm-dhcp-manager.c
- Remove useless hostname property; it's not really part of the IPv4
config
* introspection/nm-settings-system.xml
libnm-glib/nm-dbus-settings-system.c
libnm-glib/nm-dbus-settings-system.h
- Add SetHostname() call to system settings D-Bus interface
- Add Hostname property to system settings D-Bus interface
- (nm_dbus_settings_system_save_hostname,
nm_dbus_settings_system_get_hostname): implement
* src/nm-device.c
src/nm-device.h
- (nm_device_get_dhcp4_config): implement
* src/nm-manager.c
src/nm-manager.h
- Fetch and track system settings service hostname changes, and proxy
the changes via a GObject property of the manager
* system-settings/src/nm-system-config-interface.c
system-settings/src/nm-system-config-interface.h
- Replace nm_system_config_interface_supports_add() with a capabilities
bitfield
* system-settings/src/nm-system-config-error.c
system-settings/src/nm-system-config-error.h
- Add additional errors
* system-settings/src/dbus-settings.c
system-settings/src/dbus-settings.h
- (get_property, nm_sysconfig_settings_class_init): add hostname
property; first plugin returning a hostname wins
- (impl_settings_add_connection): use plugin capabilities instead of
nm_system_config_interface_supports_add()
- (impl_settings_save_hostname): implement hostname saving
* src/NetworkManagerPolicy.c
- (lookup_thread_run_cb, lookup_thread_worker, lookup_thread_new,
lookup_thread_die): implement an asynchronous hostname lookup thread
which given an IPv4 address tries to look up the hostname for that
address with reverse DNS
- (get_best_device): split out best device code from
update_routing_and_dns()
- (update_etc_hosts): update /etc/hosts with the machine's new hostname
to preserve the 127.0.0.1 reverse mapping that so many things require
- (set_system_hostname): set a given hostname
- (update_system_hostname): implement hostname policy; a configured
hostname (from the system settings service) is used if available,
otherwise an automatically determined hostname from DHCP, VPN, etc.
If there was no automatically determined hostname, reverse DNS of
the best device's IP address will be used, and as a last resort the
hostname 'localhost.localdomain' is set.
- (update_routing_and_dns): use get_best_device(); update the system
hostname when the network config changes
- (hostname_changed): update system hostname if the system settings
service signals a hostname change
- (nm_policy_new): list for system settings service hostname changes
- (nm_policy_destroy): ensure that an in-progress hostname lookup thread
gets told to die
* system-settings/plugins/keyfile/plugin.c
system-settings/plugins/ifcfg-suse/plugin.c
- (get_property, sc_plugin_ifcfg_class_init): implement hostname and
capabilities properties
* system-settings/plugins/ifcfg-fedora/shvar.c
- (svOpenFile): re-enable R/W access of ifcfg files since the plugin
writes out /etc/sysconfig/network now
* system-settings/plugins/ifcfg-fedora/plugin.c
- (plugin_get_hostname): get hostname from /etc/sysconfig/network
- (plugin_set_hostname): save hostname to /etc/sysconfig/network
- (sc_network_changed_cb): handle changes to /etc/sysconfig/network
- (sc_plugin_ifcfg_init): monitor /etc/sysconfig/network for changes
- (get_property, set_property, sc_plugin_ifcfg_class_init): implement
hostname get/set and capabilities get
git-svn-id: http://svn-archive.gnome.org/svn/NetworkManager/trunk@4077 4912f4e0-d625-0410-9fb7-b9a5a253dbdc
2008-09-18 15:16:44 +00:00
|
|
|
|
2017-04-23 14:20:37 +02:00
|
|
|
nm_dispatcher_call_hostname (NULL, NULL, NULL);
|
|
|
|
|
nm_dhcp_manager_set_default_hostname (nm_dhcp_manager_get (), hostname);
|
2008-04-07 Dan Williams <dcbw@redhat.com>
* include/NetworkManager.h
- Remove the DOWN and CANCELLED device states
- Add UNMANAGED and UNAVAILABLE device states
- Document the device states
* introspection/nm-device.xml
src/nm-device-interface.c
src/nm-device-interface.h
- Add the 'managed' property
* test/nm-tool.c
- (detail_device): print out device state
* src/NetworkManagerSystem.h
src/backends/NetworkManagerArch.c
src/backends/NetworkManagerDebian.c
src/backends/NetworkManagerFrugalware.c
src/backends/NetworkManagerGentoo.c
src/backends/NetworkManagerMandriva.c
src/backends/NetworkManagerPaldo.c
src/backends/NetworkManagerRedHat.c
src/backends/NetworkManagerSlackware.c
src/backends/NetworkManagerSuSE.c
- (nm_system_device_get_system_config, nm_system_device_get_disabled
nm_system_device_free_system_config): remove; they were unused and
their functionality should be re-implemented in each distro's
system settings service plugin
* src/nm-gsm-device.c
src/nm-gsm-device.h
src/nm-cdma-device.c
src/nm-cdma-device.h
- (*_new): take the 'managed' argument
* src/nm-device.c
- (nm_device_set_address): remove, fold into nm_device_bring_up()
- (nm_device_init): start in unmanaged state, not disconnected
- (constructor): don't start device until the system settings service
has had a chance to figure out if the device is managed or not
- (nm_device_deactivate, nm_device_bring_up, nm_device_bring_down):
don't set device state here, let callers handle that as appropriate
- (nm_device_dispose): don't touch the device if it's not managed
- (set_property, get_property, nm_device_class_init): implement the
'managed' property
- (nm_device_state_changed): bring the device up if its now managed,
and deactivate it if it used to be active
- (nm_device_get_managed, nm_device_set_managed): do the right thing
with the managed state
* src/nm-hal-manager.c
- (wired_device_creator, wireless_device_creator, modem_device_creator):
take initial managed state and pass it along to device constructors
- (create_device_and_add_to_list): get managed state and pass to
type creators
* src/nm-device-802-11-wireless.c
- (real_can_activate): fold in most of
nm_device_802_11_wireless_can_activate()
- (can_scan): can't scan in UNAVAILABLE or UNMANAGED
- (link_timeout_cb): instead of deactivating, change device state and
let the device state handler to it
- (real_update_hw_address): clean up
- (state_changed_cb): when entering UNAVAILABLE state, schedule an idle
handler to transition to DISCONNECTED if the device isn't rfkilled
* src/nm-device-802-3-ethernet.c
- (set_carrier): move above callers and get rid of prototype
- (device_state_changed): when entering UNAVAILABLE state, schedule an
idle handler to transition to DISCONNECTED if the device has a
carrier
- (real_update_hw_address): clean up
- (link_timeout_cb, ppp_state_changed): change state instead of calling
deactivation directly as deactivation doesn't change state anymore
* src/NetworkManagerPolicy.c
- (schedule_activate_check): yay, remove wireless_enabled hack since
the NMManager and wireless devices work that out themselves now
- (device_state_changed): change to a switch and update for new device
states
- (device_carrier_changed): remove; device handles this now through
state changes
- (device_added): don't care about carrier any more; the initial
activation check will happen when the device transitions to
DISCONNECTED
* src/nm-manager.c
- (dispose): clear unmanaged devices
- (handle_unmanaged_devices): update unmanaged device list and toggle
the managed property on each device when needed
- (system_settings_properties_changed_cb): handle signals from the
system settings service
- (system_settings_get_unmanaged_devices_cb): handle callback from
getting the unmanaged device list method call
- (query_unmanaged_devices): ask the system settings service for its
list of unmanaged devices
- (nm_manager_name_owner_changed, initial_get_connections): get unmanaged
devices
- (manager_set_wireless_enabled): push rfkill state down to wireless
devices directly and let them handle the necessary state transitions
- (manager_device_state_changed): update for new device states
- (nm_manager_add_device): set initial rfkill state on wireless devices
- (nm_manager_remove_device): don't touch the device if it's unmanaged
- (nm_manager_activate_connection): return error if the device is
unmanaged
- (nm_manager_sleep): handle new device states correctly; don't change
the state of unavailable/unmanaged devices
* libnm-glib/nm-device-802-11-wireless.c
- (state_changed_cb): update for new device states
git-svn-id: http://svn-archive.gnome.org/svn/NetworkManager/trunk@3540 4912f4e0-d625-0410-9fb7-b9a5a253dbdc
2008-04-08 02:58:02 +00:00
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2009-06-11 00:39:12 -04:00
|
|
|
/* General NMManager stuff */
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2010-09-01 17:08:10 -05:00
|
|
|
static gboolean
|
2011-04-22 14:56:31 -05:00
|
|
|
radio_enabled_for_rstate (RadioState *rstate, gboolean check_changeable)
|
2010-09-01 17:08:10 -05:00
|
|
|
{
|
2011-04-13 21:58:25 -05:00
|
|
|
gboolean enabled;
|
|
|
|
|
|
2011-04-22 14:56:31 -05:00
|
|
|
enabled = rstate->user_enabled && rstate->hw_enabled;
|
2013-05-31 14:28:16 -05:00
|
|
|
if (check_changeable)
|
2011-04-22 14:56:31 -05:00
|
|
|
enabled &= rstate->sw_enabled;
|
2011-04-13 21:58:25 -05:00
|
|
|
return enabled;
|
2010-09-01 17:08:10 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
2011-04-22 14:56:31 -05:00
|
|
|
radio_enabled_for_type (NMManager *self, RfKillType rtype, gboolean check_changeable)
|
2010-09-01 17:08:10 -05:00
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
|
2011-04-22 14:56:31 -05:00
|
|
|
return radio_enabled_for_rstate (&priv->radio_states[rtype], check_changeable);
|
2010-09-01 17:08:10 -05:00
|
|
|
}
|
|
|
|
|
|
2009-06-11 00:39:12 -04:00
|
|
|
static void
|
2011-04-13 21:58:25 -05:00
|
|
|
manager_update_radio_enabled (NMManager *self,
|
|
|
|
|
RadioState *rstate,
|
|
|
|
|
gboolean enabled)
|
2008-02-06 16:50:43 +00:00
|
|
|
{
|
2010-09-01 17:08:10 -05:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMDevice *device;
|
2008-02-06 16:50:43 +00:00
|
|
|
|
2009-12-23 00:03:45 -08:00
|
|
|
/* Do nothing for radio types not yet implemented */
|
|
|
|
|
if (!rstate->prop)
|
|
|
|
|
return;
|
|
|
|
|
|
2010-09-01 17:08:10 -05:00
|
|
|
g_object_notify (G_OBJECT (self), rstate->prop);
|
2009-11-02 17:29:53 -08:00
|
|
|
|
2009-06-11 00:39:12 -04:00
|
|
|
/* Don't touch devices if asleep/networking disabled */
|
2010-09-01 17:08:10 -05:00
|
|
|
if (manager_sleeping (self))
|
2009-06-11 00:39:12 -04:00
|
|
|
return;
|
2007-09-03 01:12:23 +00:00
|
|
|
|
2009-06-11 00:39:12 -04:00
|
|
|
/* enable/disable wireless devices as required */
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst) {
|
2011-11-18 12:02:58 -06:00
|
|
|
if (nm_device_get_rfkill_type (device) == rstate->rtype) {
|
2017-06-07 13:13:55 +02:00
|
|
|
_LOG2D (LOGD_RFKILL, device, "rfkill: setting radio %s", enabled ? "enabled" : "disabled");
|
2011-11-18 12:02:58 -06:00
|
|
|
nm_device_set_enabled (device, enabled);
|
2010-04-07 14:55:43 -07:00
|
|
|
}
|
2008-02-06 16:50:43 +00:00
|
|
|
}
|
2007-09-03 01:12:23 +00:00
|
|
|
}
|
|
|
|
|
|
2010-09-01 17:08:10 -05:00
|
|
|
static void
|
2014-04-15 16:25:39 -05:00
|
|
|
update_rstate_from_rfkill (NMRfkillManager *rfkill_mgr, RadioState *rstate)
|
2010-09-01 17:08:10 -05:00
|
|
|
{
|
2014-04-15 16:25:39 -05:00
|
|
|
switch (nm_rfkill_manager_get_rfkill_state (rfkill_mgr, rstate->rtype)) {
|
|
|
|
|
case RFKILL_UNBLOCKED:
|
2010-09-01 17:08:10 -05:00
|
|
|
rstate->sw_enabled = TRUE;
|
|
|
|
|
rstate->hw_enabled = TRUE;
|
2014-04-15 16:25:39 -05:00
|
|
|
break;
|
|
|
|
|
case RFKILL_SOFT_BLOCKED:
|
2010-09-01 17:08:10 -05:00
|
|
|
rstate->sw_enabled = FALSE;
|
|
|
|
|
rstate->hw_enabled = TRUE;
|
2014-04-15 16:25:39 -05:00
|
|
|
break;
|
|
|
|
|
case RFKILL_HARD_BLOCKED:
|
2010-09-01 17:08:10 -05:00
|
|
|
rstate->sw_enabled = FALSE;
|
|
|
|
|
rstate->hw_enabled = FALSE;
|
2014-04-15 16:25:39 -05:00
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
g_warn_if_reached ();
|
|
|
|
|
break;
|
2010-09-01 17:08:10 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2009-11-24 10:43:43 -08:00
|
|
|
static void
|
2009-12-23 00:03:45 -08:00
|
|
|
manager_rfkill_update_one_type (NMManager *self,
|
|
|
|
|
RadioState *rstate,
|
|
|
|
|
RfKillType rtype)
|
2009-11-24 10:43:43 -08:00
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2013-05-31 14:28:16 -05:00
|
|
|
gboolean old_enabled, new_enabled, old_rfkilled, new_rfkilled, old_hwe;
|
2010-09-01 17:08:10 -05:00
|
|
|
|
2011-04-13 21:58:25 -05:00
|
|
|
old_enabled = radio_enabled_for_rstate (rstate, TRUE);
|
2010-09-01 17:08:10 -05:00
|
|
|
old_rfkilled = rstate->hw_enabled && rstate->sw_enabled;
|
|
|
|
|
old_hwe = rstate->hw_enabled;
|
2009-11-24 10:43:43 -08:00
|
|
|
|
2014-04-15 16:25:39 -05:00
|
|
|
/* recheck kernel rfkill state */
|
|
|
|
|
update_rstate_from_rfkill (priv->rfkill_mgr, rstate);
|
2009-11-24 10:43:43 -08:00
|
|
|
|
2011-04-13 21:58:25 -05:00
|
|
|
/* Print out all states affecting device enablement */
|
2010-04-08 18:23:43 -07:00
|
|
|
if (rstate->desc) {
|
2017-06-07 13:13:55 +02:00
|
|
|
_LOGD (LOGD_RFKILL, "rfkill: %s hw-enabled %d sw-enabled %d",
|
2016-03-02 11:38:26 +01:00
|
|
|
rstate->desc, rstate->hw_enabled, rstate->sw_enabled);
|
2010-04-08 18:23:43 -07:00
|
|
|
}
|
|
|
|
|
|
2010-09-01 17:08:10 -05:00
|
|
|
/* Log new killswitch state */
|
|
|
|
|
new_rfkilled = rstate->hw_enabled && rstate->sw_enabled;
|
|
|
|
|
if (old_rfkilled != new_rfkilled) {
|
2017-06-07 13:13:55 +02:00
|
|
|
_LOGI (LOGD_RFKILL, "rfkill: %s now %s by radio killswitch",
|
2016-03-02 11:38:26 +01:00
|
|
|
rstate->desc,
|
|
|
|
|
new_rfkilled ? "enabled" : "disabled");
|
2010-09-01 17:08:10 -05:00
|
|
|
}
|
2009-12-23 00:03:45 -08:00
|
|
|
|
2010-09-01 17:08:10 -05:00
|
|
|
/* Send out property changed signal for HW enabled */
|
|
|
|
|
if (rstate->hw_enabled != old_hwe) {
|
2010-04-08 18:23:43 -07:00
|
|
|
if (rstate->hw_prop)
|
|
|
|
|
g_object_notify (G_OBJECT (self), rstate->hw_prop);
|
2009-11-24 10:43:43 -08:00
|
|
|
}
|
2010-09-01 17:08:10 -05:00
|
|
|
|
2011-04-13 21:58:25 -05:00
|
|
|
/* And finally update the actual device radio state itself; respect the
|
|
|
|
|
* daemon state here because this is never called from user-triggered
|
|
|
|
|
* radio changes and we only want to ignore the daemon enabled state when
|
|
|
|
|
* handling user radio change requests.
|
|
|
|
|
*/
|
|
|
|
|
new_enabled = radio_enabled_for_rstate (rstate, TRUE);
|
2010-09-01 17:08:10 -05:00
|
|
|
if (new_enabled != old_enabled)
|
2011-04-13 21:58:25 -05:00
|
|
|
manager_update_radio_enabled (self, rstate, new_enabled);
|
2009-12-23 00:03:45 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
nm_manager_rfkill_update (NMManager *self, RfKillType rtype)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
guint i;
|
|
|
|
|
|
2011-04-13 21:58:25 -05:00
|
|
|
if (rtype != RFKILL_TYPE_UNKNOWN)
|
2009-12-23 00:03:45 -08:00
|
|
|
manager_rfkill_update_one_type (self, &priv->radio_states[rtype], rtype);
|
2011-04-13 21:58:25 -05:00
|
|
|
else {
|
|
|
|
|
/* Otherwise sync all radio types */
|
|
|
|
|
for (i = 0; i < RFKILL_TYPE_MAX; i++)
|
|
|
|
|
manager_rfkill_update_one_type (self, &priv->radio_states[i], i);
|
2009-11-24 10:43:43 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2012-06-01 15:27:39 -05:00
|
|
|
static void
|
|
|
|
|
device_auth_done_cb (NMAuthChain *chain,
|
|
|
|
|
GError *auth_error,
|
2015-04-15 14:53:30 -04:00
|
|
|
GDBusMethodInvocation *context,
|
2012-06-01 15:27:39 -05:00
|
|
|
gpointer user_data)
|
|
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER (user_data);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
GError *error = NULL;
|
|
|
|
|
NMAuthCallResult result;
|
|
|
|
|
NMDevice *device;
|
|
|
|
|
const char *permission;
|
|
|
|
|
NMDeviceAuthRequestFunc callback;
|
2015-07-14 10:19:19 +02:00
|
|
|
NMAuthSubject *subject;
|
2012-06-01 15:27:39 -05:00
|
|
|
|
2013-07-29 11:53:23 -05:00
|
|
|
g_assert (context);
|
|
|
|
|
|
2012-06-01 15:27:39 -05:00
|
|
|
priv->auth_chains = g_slist_remove (priv->auth_chains, chain);
|
|
|
|
|
|
|
|
|
|
permission = nm_auth_chain_get_data (chain, "requested-permission");
|
|
|
|
|
g_assert (permission);
|
|
|
|
|
callback = nm_auth_chain_get_data (chain, "callback");
|
|
|
|
|
g_assert (callback);
|
|
|
|
|
device = nm_auth_chain_get_data (chain, "device");
|
|
|
|
|
g_assert (device);
|
|
|
|
|
|
2012-10-08 12:52:15 -05:00
|
|
|
result = nm_auth_chain_get_result (chain, permission);
|
2015-07-14 10:19:19 +02:00
|
|
|
subject = nm_auth_chain_get_subject (chain);
|
2012-10-08 12:52:15 -05:00
|
|
|
|
2012-06-01 15:27:39 -05:00
|
|
|
if (auth_error) {
|
|
|
|
|
/* translate the auth error into a manager permission denied error */
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGD (LOGD_CORE, "%s request failed: %s", permission, auth_error->message);
|
2012-06-01 15:27:39 -05:00
|
|
|
error = g_error_new (NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
"%s request failed: %s",
|
|
|
|
|
permission, auth_error->message);
|
|
|
|
|
} else if (result != NM_AUTH_CALL_RESULT_YES) {
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGD (LOGD_CORE, "%s request failed: not authorized", permission);
|
2012-06-01 15:27:39 -05:00
|
|
|
error = g_error_new (NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
"%s request failed: not authorized",
|
|
|
|
|
permission);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
g_assert (error || (result == NM_AUTH_CALL_RESULT_YES));
|
|
|
|
|
|
|
|
|
|
callback (device,
|
|
|
|
|
context,
|
2015-07-14 10:19:19 +02:00
|
|
|
subject,
|
2012-06-01 15:27:39 -05:00
|
|
|
error,
|
|
|
|
|
nm_auth_chain_get_data (chain, "user-data"));
|
|
|
|
|
|
|
|
|
|
g_clear_error (&error);
|
2018-04-09 11:52:55 +02:00
|
|
|
nm_auth_chain_destroy (chain);
|
2012-06-01 15:27:39 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
device_auth_request_cb (NMDevice *device,
|
2015-04-15 14:53:30 -04:00
|
|
|
GDBusMethodInvocation *context,
|
2014-01-17 11:18:23 -06:00
|
|
|
NMConnection *connection,
|
2012-06-01 15:27:39 -05:00
|
|
|
const char *permission,
|
|
|
|
|
gboolean allow_interaction,
|
|
|
|
|
NMDeviceAuthRequestFunc callback,
|
|
|
|
|
gpointer user_data,
|
|
|
|
|
NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
GError *error = NULL;
|
2014-01-17 11:18:23 -06:00
|
|
|
NMAuthSubject *subject = NULL;
|
2012-06-01 15:27:39 -05:00
|
|
|
NMAuthChain *chain;
|
|
|
|
|
|
2014-01-17 11:18:23 -06:00
|
|
|
/* Validate the caller */
|
2014-08-14 13:34:57 +02:00
|
|
|
subject = nm_auth_subject_new_unix_process_from_context (context);
|
2014-01-17 11:18:23 -06:00
|
|
|
if (!subject) {
|
|
|
|
|
error = g_error_new_literal (NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
"Failed to get request UID.");
|
|
|
|
|
goto done;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Ensure the subject has permissions for this connection */
|
2018-04-12 09:48:16 +02:00
|
|
|
if ( connection
|
|
|
|
|
&& !nm_auth_is_subject_in_acl_set_error (connection,
|
|
|
|
|
subject,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
&error))
|
2014-01-17 11:18:23 -06:00
|
|
|
goto done;
|
|
|
|
|
|
2012-12-16 12:30:41 -06:00
|
|
|
/* Validate the request */
|
2014-01-17 11:18:23 -06:00
|
|
|
chain = nm_auth_chain_new_subject (subject, context, device_auth_done_cb, self);
|
2013-07-29 11:53:23 -05:00
|
|
|
if (!chain) {
|
2012-06-01 15:27:39 -05:00
|
|
|
error = g_error_new_literal (NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
2013-07-29 11:53:23 -05:00
|
|
|
"Unable to authenticate request.");
|
2014-01-17 11:18:23 -06:00
|
|
|
goto done;
|
2012-06-01 15:27:39 -05:00
|
|
|
}
|
2013-07-29 11:53:23 -05:00
|
|
|
|
|
|
|
|
priv->auth_chains = g_slist_append (priv->auth_chains, chain);
|
|
|
|
|
nm_auth_chain_set_data (chain, "device", g_object_ref (device), g_object_unref);
|
|
|
|
|
nm_auth_chain_set_data (chain, "requested-permission", g_strdup (permission), g_free);
|
|
|
|
|
nm_auth_chain_set_data (chain, "callback", callback, NULL);
|
|
|
|
|
nm_auth_chain_set_data (chain, "user-data", user_data, NULL);
|
|
|
|
|
nm_auth_chain_add_call (chain, permission, allow_interaction);
|
2014-01-17 11:18:23 -06:00
|
|
|
|
|
|
|
|
done:
|
|
|
|
|
if (error)
|
2015-07-14 10:19:19 +02:00
|
|
|
callback (device, context, subject, error, user_data);
|
|
|
|
|
|
|
|
|
|
g_clear_object (&subject);
|
2014-01-17 11:18:23 -06:00
|
|
|
g_clear_error (&error);
|
2012-06-01 15:27:39 -05:00
|
|
|
}
|
|
|
|
|
|
2018-08-08 11:33:31 +02:00
|
|
|
static gboolean
|
|
|
|
|
new_activation_allowed_for_connection (NMManager *self,
|
|
|
|
|
NMSettingsConnection *connection)
|
|
|
|
|
{
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (NM_IN_SET (_nm_connection_get_multi_connect (nm_settings_connection_get_connection (connection)),
|
2018-08-08 11:33:31 +02:00
|
|
|
NM_CONNECTION_MULTI_CONNECT_MANUAL_MULTIPLE,
|
|
|
|
|
NM_CONNECTION_MULTI_CONNECT_MULTIPLE))
|
|
|
|
|
return TRUE;
|
|
|
|
|
|
|
|
|
|
return !active_connection_find (self, connection, NULL,
|
|
|
|
|
NM_ACTIVE_CONNECTION_STATE_ACTIVATED,
|
|
|
|
|
NULL);
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-27 14:39:13 +02:00
|
|
|
/**
|
2013-11-02 10:40:58 -05:00
|
|
|
* get_existing_connection:
|
2013-06-27 14:39:13 +02:00
|
|
|
* @manager: #NMManager instance
|
|
|
|
|
* @device: #NMDevice instance
|
2014-06-20 20:13:14 +02:00
|
|
|
* @out_generated: (allow-none): return TRUE, if the connection was generated.
|
2013-06-27 14:39:13 +02:00
|
|
|
*
|
2013-11-02 10:40:58 -05:00
|
|
|
* Returns: a #NMSettingsConnection to be assumed by the device, or %NULL if
|
|
|
|
|
* the device does not support assuming existing connections.
|
2013-06-27 14:39:13 +02:00
|
|
|
*/
|
2015-07-14 16:53:24 +02:00
|
|
|
static NMSettingsConnection *
|
core: only assume connections that were managed in a previous run of NetworkManager
Before, we would have the concept of assumed connections, which is used
for (1) externally configured device that NetworkManager should not
touch and (2) connections that NetworkManager should gracefully take
over after a restart (seamlessly, non-destructively).
The behavior was unclear and mixed. It wasn't clear whether the device
is in no-touch mode (1) or gracefully take-over (2).
Previous commits already introduce separate activation types EXTERNAL (1)
and ASSUME (2).
Also, previously, we would for both (1) and (2) try to find a matching
connection and use it. That doesn't make sense for either.
In the external case (1), we should not pretend that an existing connection
is active. Let's always create a new in-memory connection for these
cases. Note that this means, external devices now will always generate
a connection, instead of pretending an existing one is active.
For the assume case (2), we shall not use nm_utils_match_connection() to
guess which connection might be active. It can only the one that was
active on a previous run of NetworkManager. So, use the information from
the state file and try to activate it. If that fails, it is not an
assume activation type. Note, that this means we now most of the time
don't do ASSUME anymore. Most of the time we do EXTERNAL activation
That is because the state information is only available after restart
of NetworkManager.
2017-03-08 08:45:11 +01:00
|
|
|
get_existing_connection (NMManager *self,
|
|
|
|
|
NMDevice *device,
|
|
|
|
|
gboolean *out_generated)
|
2013-06-27 14:39:13 +02:00
|
|
|
{
|
2016-03-02 11:38:26 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
Revert "core: merge branch 'bg/restart-assume-rh1551958'"
This reverts commit cc1920d71470042c4e0837848da9183526b663d0, reversing
changes made to eb8257dea5802a004af9cccacb30af98440e2172.
This breaks restart, at least for Wi-Fi devices:
#0 0x00007ffff5ee8771 in _g_log_abort (breakpoint=breakpoint@entry=1) at gmessages.c:554
#1 0x00007ffff5ee9a5b in g_logv (log_domain=0x7ffff671a738 "GLib-GIO", log_level=G_LOG_LEVEL_CRITICAL, format=<optimized out>, args=args@entry=0x7fffffffd720) at gmessages.c:1362
#2 0x00007ffff5ee9baf in g_log (log_domain=log_domain@entry=0x7ffff671a738 "GLib-GIO", log_level=log_level@entry=G_LOG_LEVEL_CRITICAL, format=format@entry=0x7ffff5f347ea "%s: assertion '%s' failed") at gmessages.c:1403
#3 0x00007ffff5eea0f9 in g_return_if_fail_warning (log_domain=log_domain@entry=0x7ffff671a738 "GLib-GIO", pretty_function=pretty_function@entry=0x7ffff673fc10 <__func__.25628> "g_dbus_proxy_call_internal", expression=expression@entry=0x7ffff673fb1c "G_IS_DBUS_PROXY (proxy)") at gmessages.c:2702
#4 0x00007ffff66cdc5f in g_dbus_proxy_call_internal (proxy=0x0, method_name=method_name@entry=0x555555810510 "Scan", parameters=0x555555c7a530, flags=flags@entry=G_DBUS_CALL_FLAGS_NONE, timeout_msec=timeout_msec@entry=-1, fd_list=fd_list@entry=0x0, cancellable=0x0, callback=0x55555574cb96 <scan_request_cb>, user_data=0x555555ac2220) at gdbusproxy.c:2664
#5 0x00007ffff66cf686 in g_dbus_proxy_call (proxy=<optimized out>, method_name=method_name@entry=0x555555810510 "Scan", parameters=<optimized out>, flags=flags@entry=G_DBUS_CALL_FLAGS_NONE, timeout_msec=timeout_msec@entry=-1, cancellable=cancellable@entry=0x0, callback=0x55555574cb96 <scan_request_cb>, user_data=0x555555ac2220) at gdbusproxy.c:2970
#6 0x000055555574e026 in nm_supplicant_interface_request_scan (self=0x555555ac2220 [NMSupplicantInterface], ssids=ssids@entry=0x0) at src/supplicant/nm-supplicant-interface.c:1821
#7 0x00007fffe1038276 in request_wireless_scan (self=self@entry=0x555555c6ee60 [NMDeviceWifi], periodic=periodic@entry=0, force_if_scanning=force_if_scanning@entry=0, ssids=<optimized out>, ssids@entry=0x0) at src/devices/wifi/nm-device-wifi.c:1347
#8 0x00007fffe1039011 in device_state_changed (device=0x555555c6ee60 [NMDeviceWifi], new_state=NM_DEVICE_STATE_DISCONNECTED, old_state=<optimized out>, reason=<optimized out>)
at src/devices/wifi/nm-device-wifi.c:2998
#9 0x00007ffff432ed1e in ffi_call_unix64 () at ../src/x86/unix64.S:76
#10 0x00007ffff432e68f in ffi_call (cif=cif@entry=0x7fffffffdc70, fn=fn@entry=0x7fffe1038e1e <device_state_changed>, rvalue=<optimized out>, avalue=avalue@entry=0x7fffffffdb60)
at ../src/x86/ffi64.c:525
#15 0x00007ffff63db66f in <emit signal ??? on instance 0x555555c6ee60 [NMDeviceWifi]> (instance=instance@entry=0x555555c6ee60, signal_id=<optimized out>, detail=detail@entry=0)
at gsignal.c:3447
#11 0x00007ffff63bff39 in g_cclosure_marshal_generic (closure=0x555555c22ea0, return_gvalue=0x0, n_param_values=<optimized out>, param_values=<optimized out>, invocation_hint=<optimized out>, marshal_data=<optimized out>) at gclosure.c:1490
#12 0x00007ffff63bf73d in g_closure_invoke (closure=0x555555c22ea0, return_value=0x0, n_param_values=4, param_values=0x7fffffffdea0, invocation_hint=0x7fffffffde20) at gclosure.c:804
#13 0x00007ffff63d1f30 in signal_emit_unlocked_R (node=node@entry=0x555555c22750, detail=detail@entry=0, instance=instance@entry=0x555555c6ee60, emission_return=emission_return@entry=0x0, instance_and_params=instance_and_params@entry=0x7fffffffdea0) at gsignal.c:3673
#14 0x00007ffff63dad05 in g_signal_emit_valist (instance=0x555555c6ee60, signal_id=<optimized out>, detail=0, var_args=var_args@entry=0x7fffffffe0b0) at gsignal.c:3391
#16 0x00005555556f0f18 in _set_state_full (self=self@entry=0x555555c6ee60 [NMDeviceWifi], state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_ASSUMED, quitting=quitting@entry=0) at src/devices/nm-device.c:13268
#17 0x00005555556f1774 in nm_device_state_changed (self=self@entry=0x555555c6ee60 [NMDeviceWifi], state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_ASSUMED) at src/devices/nm-device.c:13435
#18 0x00005555555bcf95 in recheck_assume_connection (self=self@entry=0x555555b09140 [NMManager], device=device@entry=0x555555c6ee60 [NMDeviceWifi]) at src/nm-manager.c:2297
#19 0x00005555555bd53e in _device_realize_finish (self=self@entry=0x555555b09140 [NMManager], device=device@entry=0x555555c6ee60 [NMDeviceWifi], plink=plink@entry=0x555555ae43d8)
at src/nm-manager.c:2473
#20 0x00005555555c01d0 in platform_link_added (self=self@entry=0x555555b09140 [NMManager], ifindex=<optimized out>, plink=plink@entry=0x555555ae43d8, guess_assume=<optimized out>, dev_state=<optimized out>) at src/nm-manager.c:2789
#21 0x00005555555c0cec in platform_query_devices (self=self@entry=0x555555b09140 [NMManager]) at src/nm-manager.c:2901
#22 0x00005555555c439e in nm_manager_start (self=0x555555b09140 [NMManager], error=<optimized out>) at src/nm-manager.c:5632
#23 0x000055555558498e in main (argc=<optimized out>, argv=<optimized out>) at src/main.c:413
2018-04-04 14:48:52 +02:00
|
|
|
gs_unref_object NMConnection *connection = NULL;
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *added;
|
2013-08-23 15:45:17 +02:00
|
|
|
GError *error = NULL;
|
2018-04-05 11:19:54 +02:00
|
|
|
gs_free_error GError *gen_error = NULL;
|
2014-06-18 20:17:57 +02:00
|
|
|
NMDevice *master = NULL;
|
|
|
|
|
int ifindex = nm_device_get_ifindex (device);
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *matched = NULL;
|
device: assume matching connections during first start
Since commit 2d1b85f (th/assume-vs-unmanaged-bgo746440), we clearly
distinguish between two modes when encountering devices with external
IP configuration:
a) external devices. For those devices we generate a volatile in-memory
connection and pretend it's active. However, the device must not be
touched by NetworkManager in any way.
b) assume, seamless take over. Mostly for restart of NetworkManager,
we activate a connection gracefully without going through an down-up
cycle. After the device reaches activated state, the device is
considered fully managed. For this only an existing, non volatile
connection can be used.
Before 'th/assume-vs-unmanaged-bgo746440', the behaviors were not
clearly separated.
Since then, we only choose to assume a connection (b) when the state
file indicates a matching connection. Now, extend this to also assume
connections when:
- during first-start (not after a restart) when there is no
state file yet.
- and, if we have an existing, non volatile, connection which
matches the device's configuration.
This patch lets NetworkManager assume connection also on first start.
That is for example useful when handing over network configuration from
initrd.
This only applies to existing, permanent, matching(!) connections, so it is a
good guess that the user wants NM to take over this interface. This brings us
closer to the previous behavior before 'th/assume-vs-unmanaged-bgo746440'.
https://bugzilla.redhat.com/show_bug.cgi?id=1439220
(cherry picked from commit 27b2477cb7dad2410c88c7dfca51f3aad208b881)
2017-04-19 16:16:12 +02:00
|
|
|
NMSettingsConnection *connection_checked = NULL;
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
gboolean assume_state_guess_assume = FALSE;
|
|
|
|
|
const char *assume_state_connection_uuid = NULL;
|
2018-04-05 11:19:54 +02:00
|
|
|
gboolean maybe_later, only_by_uuid = FALSE;
|
2013-06-27 14:39:13 +02:00
|
|
|
|
2014-06-20 20:13:14 +02:00
|
|
|
if (out_generated)
|
|
|
|
|
*out_generated = FALSE;
|
|
|
|
|
|
2013-11-02 10:38:23 -05:00
|
|
|
nm_device_capture_initial_config (device);
|
|
|
|
|
|
2014-06-18 20:17:57 +02:00
|
|
|
if (ifindex) {
|
2017-09-29 15:11:33 +02:00
|
|
|
int master_ifindex = nm_platform_link_get_master (priv->platform, ifindex);
|
2014-06-18 20:17:57 +02:00
|
|
|
|
|
|
|
|
if (master_ifindex) {
|
2016-03-02 11:38:26 +01:00
|
|
|
master = nm_manager_get_device_by_ifindex (self, master_ifindex);
|
2014-06-18 20:17:57 +02:00
|
|
|
if (!master) {
|
2017-06-07 16:41:06 +02:00
|
|
|
_LOG2D (LOGD_DEVICE, device, "assume: don't assume because "
|
|
|
|
|
"cannot generate connection for slave before its master (%s/%d)",
|
2017-09-29 15:11:33 +02:00
|
|
|
nm_platform_link_get_name (priv->platform, master_ifindex), master_ifindex);
|
2014-06-18 20:17:57 +02:00
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
if (!nm_device_get_act_request (master)) {
|
2017-06-07 16:41:06 +02:00
|
|
|
_LOG2D (LOGD_DEVICE, device, "assume: don't assume because "
|
|
|
|
|
"cannot generate connection for slave before master %s activates",
|
|
|
|
|
nm_device_get_iface (master));
|
2014-06-18 20:17:57 +02:00
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
Revert "core: merge branch 'bg/restart-assume-rh1551958'"
This reverts commit cc1920d71470042c4e0837848da9183526b663d0, reversing
changes made to eb8257dea5802a004af9cccacb30af98440e2172.
This breaks restart, at least for Wi-Fi devices:
#0 0x00007ffff5ee8771 in _g_log_abort (breakpoint=breakpoint@entry=1) at gmessages.c:554
#1 0x00007ffff5ee9a5b in g_logv (log_domain=0x7ffff671a738 "GLib-GIO", log_level=G_LOG_LEVEL_CRITICAL, format=<optimized out>, args=args@entry=0x7fffffffd720) at gmessages.c:1362
#2 0x00007ffff5ee9baf in g_log (log_domain=log_domain@entry=0x7ffff671a738 "GLib-GIO", log_level=log_level@entry=G_LOG_LEVEL_CRITICAL, format=format@entry=0x7ffff5f347ea "%s: assertion '%s' failed") at gmessages.c:1403
#3 0x00007ffff5eea0f9 in g_return_if_fail_warning (log_domain=log_domain@entry=0x7ffff671a738 "GLib-GIO", pretty_function=pretty_function@entry=0x7ffff673fc10 <__func__.25628> "g_dbus_proxy_call_internal", expression=expression@entry=0x7ffff673fb1c "G_IS_DBUS_PROXY (proxy)") at gmessages.c:2702
#4 0x00007ffff66cdc5f in g_dbus_proxy_call_internal (proxy=0x0, method_name=method_name@entry=0x555555810510 "Scan", parameters=0x555555c7a530, flags=flags@entry=G_DBUS_CALL_FLAGS_NONE, timeout_msec=timeout_msec@entry=-1, fd_list=fd_list@entry=0x0, cancellable=0x0, callback=0x55555574cb96 <scan_request_cb>, user_data=0x555555ac2220) at gdbusproxy.c:2664
#5 0x00007ffff66cf686 in g_dbus_proxy_call (proxy=<optimized out>, method_name=method_name@entry=0x555555810510 "Scan", parameters=<optimized out>, flags=flags@entry=G_DBUS_CALL_FLAGS_NONE, timeout_msec=timeout_msec@entry=-1, cancellable=cancellable@entry=0x0, callback=0x55555574cb96 <scan_request_cb>, user_data=0x555555ac2220) at gdbusproxy.c:2970
#6 0x000055555574e026 in nm_supplicant_interface_request_scan (self=0x555555ac2220 [NMSupplicantInterface], ssids=ssids@entry=0x0) at src/supplicant/nm-supplicant-interface.c:1821
#7 0x00007fffe1038276 in request_wireless_scan (self=self@entry=0x555555c6ee60 [NMDeviceWifi], periodic=periodic@entry=0, force_if_scanning=force_if_scanning@entry=0, ssids=<optimized out>, ssids@entry=0x0) at src/devices/wifi/nm-device-wifi.c:1347
#8 0x00007fffe1039011 in device_state_changed (device=0x555555c6ee60 [NMDeviceWifi], new_state=NM_DEVICE_STATE_DISCONNECTED, old_state=<optimized out>, reason=<optimized out>)
at src/devices/wifi/nm-device-wifi.c:2998
#9 0x00007ffff432ed1e in ffi_call_unix64 () at ../src/x86/unix64.S:76
#10 0x00007ffff432e68f in ffi_call (cif=cif@entry=0x7fffffffdc70, fn=fn@entry=0x7fffe1038e1e <device_state_changed>, rvalue=<optimized out>, avalue=avalue@entry=0x7fffffffdb60)
at ../src/x86/ffi64.c:525
#15 0x00007ffff63db66f in <emit signal ??? on instance 0x555555c6ee60 [NMDeviceWifi]> (instance=instance@entry=0x555555c6ee60, signal_id=<optimized out>, detail=detail@entry=0)
at gsignal.c:3447
#11 0x00007ffff63bff39 in g_cclosure_marshal_generic (closure=0x555555c22ea0, return_gvalue=0x0, n_param_values=<optimized out>, param_values=<optimized out>, invocation_hint=<optimized out>, marshal_data=<optimized out>) at gclosure.c:1490
#12 0x00007ffff63bf73d in g_closure_invoke (closure=0x555555c22ea0, return_value=0x0, n_param_values=4, param_values=0x7fffffffdea0, invocation_hint=0x7fffffffde20) at gclosure.c:804
#13 0x00007ffff63d1f30 in signal_emit_unlocked_R (node=node@entry=0x555555c22750, detail=detail@entry=0, instance=instance@entry=0x555555c6ee60, emission_return=emission_return@entry=0x0, instance_and_params=instance_and_params@entry=0x7fffffffdea0) at gsignal.c:3673
#14 0x00007ffff63dad05 in g_signal_emit_valist (instance=0x555555c6ee60, signal_id=<optimized out>, detail=0, var_args=var_args@entry=0x7fffffffe0b0) at gsignal.c:3391
#16 0x00005555556f0f18 in _set_state_full (self=self@entry=0x555555c6ee60 [NMDeviceWifi], state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_ASSUMED, quitting=quitting@entry=0) at src/devices/nm-device.c:13268
#17 0x00005555556f1774 in nm_device_state_changed (self=self@entry=0x555555c6ee60 [NMDeviceWifi], state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_ASSUMED) at src/devices/nm-device.c:13435
#18 0x00005555555bcf95 in recheck_assume_connection (self=self@entry=0x555555b09140 [NMManager], device=device@entry=0x555555c6ee60 [NMDeviceWifi]) at src/nm-manager.c:2297
#19 0x00005555555bd53e in _device_realize_finish (self=self@entry=0x555555b09140 [NMManager], device=device@entry=0x555555c6ee60 [NMDeviceWifi], plink=plink@entry=0x555555ae43d8)
at src/nm-manager.c:2473
#20 0x00005555555c01d0 in platform_link_added (self=self@entry=0x555555b09140 [NMManager], ifindex=<optimized out>, plink=plink@entry=0x555555ae43d8, guess_assume=<optimized out>, dev_state=<optimized out>) at src/nm-manager.c:2789
#21 0x00005555555c0cec in platform_query_devices (self=self@entry=0x555555b09140 [NMManager]) at src/nm-manager.c:2901
#22 0x00005555555c439e in nm_manager_start (self=0x555555b09140 [NMManager], error=<optimized out>) at src/nm-manager.c:5632
#23 0x000055555558498e in main (argc=<optimized out>, argv=<optimized out>) at src/main.c:413
2018-04-04 14:48:52 +02:00
|
|
|
/* The core of the API is nm_device_generate_connection() function and
|
|
|
|
|
* update_connection() virtual method and the convenient connection_type
|
|
|
|
|
* class attribute. Subclasses supporting the new API must have
|
|
|
|
|
* update_connection() implemented, otherwise nm_device_generate_connection()
|
|
|
|
|
* returns NULL.
|
|
|
|
|
*/
|
2018-04-05 11:19:54 +02:00
|
|
|
connection = nm_device_generate_connection (device, master, &maybe_later, &gen_error);
|
Revert "core: merge branch 'bg/restart-assume-rh1551958'"
This reverts commit cc1920d71470042c4e0837848da9183526b663d0, reversing
changes made to eb8257dea5802a004af9cccacb30af98440e2172.
This breaks restart, at least for Wi-Fi devices:
#0 0x00007ffff5ee8771 in _g_log_abort (breakpoint=breakpoint@entry=1) at gmessages.c:554
#1 0x00007ffff5ee9a5b in g_logv (log_domain=0x7ffff671a738 "GLib-GIO", log_level=G_LOG_LEVEL_CRITICAL, format=<optimized out>, args=args@entry=0x7fffffffd720) at gmessages.c:1362
#2 0x00007ffff5ee9baf in g_log (log_domain=log_domain@entry=0x7ffff671a738 "GLib-GIO", log_level=log_level@entry=G_LOG_LEVEL_CRITICAL, format=format@entry=0x7ffff5f347ea "%s: assertion '%s' failed") at gmessages.c:1403
#3 0x00007ffff5eea0f9 in g_return_if_fail_warning (log_domain=log_domain@entry=0x7ffff671a738 "GLib-GIO", pretty_function=pretty_function@entry=0x7ffff673fc10 <__func__.25628> "g_dbus_proxy_call_internal", expression=expression@entry=0x7ffff673fb1c "G_IS_DBUS_PROXY (proxy)") at gmessages.c:2702
#4 0x00007ffff66cdc5f in g_dbus_proxy_call_internal (proxy=0x0, method_name=method_name@entry=0x555555810510 "Scan", parameters=0x555555c7a530, flags=flags@entry=G_DBUS_CALL_FLAGS_NONE, timeout_msec=timeout_msec@entry=-1, fd_list=fd_list@entry=0x0, cancellable=0x0, callback=0x55555574cb96 <scan_request_cb>, user_data=0x555555ac2220) at gdbusproxy.c:2664
#5 0x00007ffff66cf686 in g_dbus_proxy_call (proxy=<optimized out>, method_name=method_name@entry=0x555555810510 "Scan", parameters=<optimized out>, flags=flags@entry=G_DBUS_CALL_FLAGS_NONE, timeout_msec=timeout_msec@entry=-1, cancellable=cancellable@entry=0x0, callback=0x55555574cb96 <scan_request_cb>, user_data=0x555555ac2220) at gdbusproxy.c:2970
#6 0x000055555574e026 in nm_supplicant_interface_request_scan (self=0x555555ac2220 [NMSupplicantInterface], ssids=ssids@entry=0x0) at src/supplicant/nm-supplicant-interface.c:1821
#7 0x00007fffe1038276 in request_wireless_scan (self=self@entry=0x555555c6ee60 [NMDeviceWifi], periodic=periodic@entry=0, force_if_scanning=force_if_scanning@entry=0, ssids=<optimized out>, ssids@entry=0x0) at src/devices/wifi/nm-device-wifi.c:1347
#8 0x00007fffe1039011 in device_state_changed (device=0x555555c6ee60 [NMDeviceWifi], new_state=NM_DEVICE_STATE_DISCONNECTED, old_state=<optimized out>, reason=<optimized out>)
at src/devices/wifi/nm-device-wifi.c:2998
#9 0x00007ffff432ed1e in ffi_call_unix64 () at ../src/x86/unix64.S:76
#10 0x00007ffff432e68f in ffi_call (cif=cif@entry=0x7fffffffdc70, fn=fn@entry=0x7fffe1038e1e <device_state_changed>, rvalue=<optimized out>, avalue=avalue@entry=0x7fffffffdb60)
at ../src/x86/ffi64.c:525
#15 0x00007ffff63db66f in <emit signal ??? on instance 0x555555c6ee60 [NMDeviceWifi]> (instance=instance@entry=0x555555c6ee60, signal_id=<optimized out>, detail=detail@entry=0)
at gsignal.c:3447
#11 0x00007ffff63bff39 in g_cclosure_marshal_generic (closure=0x555555c22ea0, return_gvalue=0x0, n_param_values=<optimized out>, param_values=<optimized out>, invocation_hint=<optimized out>, marshal_data=<optimized out>) at gclosure.c:1490
#12 0x00007ffff63bf73d in g_closure_invoke (closure=0x555555c22ea0, return_value=0x0, n_param_values=4, param_values=0x7fffffffdea0, invocation_hint=0x7fffffffde20) at gclosure.c:804
#13 0x00007ffff63d1f30 in signal_emit_unlocked_R (node=node@entry=0x555555c22750, detail=detail@entry=0, instance=instance@entry=0x555555c6ee60, emission_return=emission_return@entry=0x0, instance_and_params=instance_and_params@entry=0x7fffffffdea0) at gsignal.c:3673
#14 0x00007ffff63dad05 in g_signal_emit_valist (instance=0x555555c6ee60, signal_id=<optimized out>, detail=0, var_args=var_args@entry=0x7fffffffe0b0) at gsignal.c:3391
#16 0x00005555556f0f18 in _set_state_full (self=self@entry=0x555555c6ee60 [NMDeviceWifi], state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_ASSUMED, quitting=quitting@entry=0) at src/devices/nm-device.c:13268
#17 0x00005555556f1774 in nm_device_state_changed (self=self@entry=0x555555c6ee60 [NMDeviceWifi], state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_ASSUMED) at src/devices/nm-device.c:13435
#18 0x00005555555bcf95 in recheck_assume_connection (self=self@entry=0x555555b09140 [NMManager], device=device@entry=0x555555c6ee60 [NMDeviceWifi]) at src/nm-manager.c:2297
#19 0x00005555555bd53e in _device_realize_finish (self=self@entry=0x555555b09140 [NMManager], device=device@entry=0x555555c6ee60 [NMDeviceWifi], plink=plink@entry=0x555555ae43d8)
at src/nm-manager.c:2473
#20 0x00005555555c01d0 in platform_link_added (self=self@entry=0x555555b09140 [NMManager], ifindex=<optimized out>, plink=plink@entry=0x555555ae43d8, guess_assume=<optimized out>, dev_state=<optimized out>) at src/nm-manager.c:2789
#21 0x00005555555c0cec in platform_query_devices (self=self@entry=0x555555b09140 [NMManager]) at src/nm-manager.c:2901
#22 0x00005555555c439e in nm_manager_start (self=0x555555b09140 [NMManager], error=<optimized out>) at src/nm-manager.c:5632
#23 0x000055555558498e in main (argc=<optimized out>, argv=<optimized out>) at src/main.c:413
2018-04-04 14:48:52 +02:00
|
|
|
if (!connection) {
|
2018-04-05 11:19:54 +02:00
|
|
|
if (maybe_later) {
|
|
|
|
|
/* The device can generate a connection, but it failed for now.
|
|
|
|
|
* Give it a chance to match a connection from the state file. */
|
|
|
|
|
only_by_uuid = TRUE;
|
|
|
|
|
} else {
|
Revert "core: merge branch 'bg/restart-assume-rh1551958'"
This reverts commit cc1920d71470042c4e0837848da9183526b663d0, reversing
changes made to eb8257dea5802a004af9cccacb30af98440e2172.
This breaks restart, at least for Wi-Fi devices:
#0 0x00007ffff5ee8771 in _g_log_abort (breakpoint=breakpoint@entry=1) at gmessages.c:554
#1 0x00007ffff5ee9a5b in g_logv (log_domain=0x7ffff671a738 "GLib-GIO", log_level=G_LOG_LEVEL_CRITICAL, format=<optimized out>, args=args@entry=0x7fffffffd720) at gmessages.c:1362
#2 0x00007ffff5ee9baf in g_log (log_domain=log_domain@entry=0x7ffff671a738 "GLib-GIO", log_level=log_level@entry=G_LOG_LEVEL_CRITICAL, format=format@entry=0x7ffff5f347ea "%s: assertion '%s' failed") at gmessages.c:1403
#3 0x00007ffff5eea0f9 in g_return_if_fail_warning (log_domain=log_domain@entry=0x7ffff671a738 "GLib-GIO", pretty_function=pretty_function@entry=0x7ffff673fc10 <__func__.25628> "g_dbus_proxy_call_internal", expression=expression@entry=0x7ffff673fb1c "G_IS_DBUS_PROXY (proxy)") at gmessages.c:2702
#4 0x00007ffff66cdc5f in g_dbus_proxy_call_internal (proxy=0x0, method_name=method_name@entry=0x555555810510 "Scan", parameters=0x555555c7a530, flags=flags@entry=G_DBUS_CALL_FLAGS_NONE, timeout_msec=timeout_msec@entry=-1, fd_list=fd_list@entry=0x0, cancellable=0x0, callback=0x55555574cb96 <scan_request_cb>, user_data=0x555555ac2220) at gdbusproxy.c:2664
#5 0x00007ffff66cf686 in g_dbus_proxy_call (proxy=<optimized out>, method_name=method_name@entry=0x555555810510 "Scan", parameters=<optimized out>, flags=flags@entry=G_DBUS_CALL_FLAGS_NONE, timeout_msec=timeout_msec@entry=-1, cancellable=cancellable@entry=0x0, callback=0x55555574cb96 <scan_request_cb>, user_data=0x555555ac2220) at gdbusproxy.c:2970
#6 0x000055555574e026 in nm_supplicant_interface_request_scan (self=0x555555ac2220 [NMSupplicantInterface], ssids=ssids@entry=0x0) at src/supplicant/nm-supplicant-interface.c:1821
#7 0x00007fffe1038276 in request_wireless_scan (self=self@entry=0x555555c6ee60 [NMDeviceWifi], periodic=periodic@entry=0, force_if_scanning=force_if_scanning@entry=0, ssids=<optimized out>, ssids@entry=0x0) at src/devices/wifi/nm-device-wifi.c:1347
#8 0x00007fffe1039011 in device_state_changed (device=0x555555c6ee60 [NMDeviceWifi], new_state=NM_DEVICE_STATE_DISCONNECTED, old_state=<optimized out>, reason=<optimized out>)
at src/devices/wifi/nm-device-wifi.c:2998
#9 0x00007ffff432ed1e in ffi_call_unix64 () at ../src/x86/unix64.S:76
#10 0x00007ffff432e68f in ffi_call (cif=cif@entry=0x7fffffffdc70, fn=fn@entry=0x7fffe1038e1e <device_state_changed>, rvalue=<optimized out>, avalue=avalue@entry=0x7fffffffdb60)
at ../src/x86/ffi64.c:525
#15 0x00007ffff63db66f in <emit signal ??? on instance 0x555555c6ee60 [NMDeviceWifi]> (instance=instance@entry=0x555555c6ee60, signal_id=<optimized out>, detail=detail@entry=0)
at gsignal.c:3447
#11 0x00007ffff63bff39 in g_cclosure_marshal_generic (closure=0x555555c22ea0, return_gvalue=0x0, n_param_values=<optimized out>, param_values=<optimized out>, invocation_hint=<optimized out>, marshal_data=<optimized out>) at gclosure.c:1490
#12 0x00007ffff63bf73d in g_closure_invoke (closure=0x555555c22ea0, return_value=0x0, n_param_values=4, param_values=0x7fffffffdea0, invocation_hint=0x7fffffffde20) at gclosure.c:804
#13 0x00007ffff63d1f30 in signal_emit_unlocked_R (node=node@entry=0x555555c22750, detail=detail@entry=0, instance=instance@entry=0x555555c6ee60, emission_return=emission_return@entry=0x0, instance_and_params=instance_and_params@entry=0x7fffffffdea0) at gsignal.c:3673
#14 0x00007ffff63dad05 in g_signal_emit_valist (instance=0x555555c6ee60, signal_id=<optimized out>, detail=0, var_args=var_args@entry=0x7fffffffe0b0) at gsignal.c:3391
#16 0x00005555556f0f18 in _set_state_full (self=self@entry=0x555555c6ee60 [NMDeviceWifi], state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_ASSUMED, quitting=quitting@entry=0) at src/devices/nm-device.c:13268
#17 0x00005555556f1774 in nm_device_state_changed (self=self@entry=0x555555c6ee60 [NMDeviceWifi], state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_ASSUMED) at src/devices/nm-device.c:13435
#18 0x00005555555bcf95 in recheck_assume_connection (self=self@entry=0x555555b09140 [NMManager], device=device@entry=0x555555c6ee60 [NMDeviceWifi]) at src/nm-manager.c:2297
#19 0x00005555555bd53e in _device_realize_finish (self=self@entry=0x555555b09140 [NMManager], device=device@entry=0x555555c6ee60 [NMDeviceWifi], plink=plink@entry=0x555555ae43d8)
at src/nm-manager.c:2473
#20 0x00005555555c01d0 in platform_link_added (self=self@entry=0x555555b09140 [NMManager], ifindex=<optimized out>, plink=plink@entry=0x555555ae43d8, guess_assume=<optimized out>, dev_state=<optimized out>) at src/nm-manager.c:2789
#21 0x00005555555c0cec in platform_query_devices (self=self@entry=0x555555b09140 [NMManager]) at src/nm-manager.c:2901
#22 0x00005555555c439e in nm_manager_start (self=0x555555b09140 [NMManager], error=<optimized out>) at src/nm-manager.c:5632
#23 0x000055555558498e in main (argc=<optimized out>, argv=<optimized out>) at src/main.c:413
2018-04-04 14:48:52 +02:00
|
|
|
nm_device_assume_state_reset (device);
|
2018-04-05 11:19:54 +02:00
|
|
|
_LOG2D (LOGD_DEVICE, device, "assume: cannot generate connection: %s",
|
|
|
|
|
gen_error->message);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
Revert "core: merge branch 'bg/restart-assume-rh1551958'"
This reverts commit cc1920d71470042c4e0837848da9183526b663d0, reversing
changes made to eb8257dea5802a004af9cccacb30af98440e2172.
This breaks restart, at least for Wi-Fi devices:
#0 0x00007ffff5ee8771 in _g_log_abort (breakpoint=breakpoint@entry=1) at gmessages.c:554
#1 0x00007ffff5ee9a5b in g_logv (log_domain=0x7ffff671a738 "GLib-GIO", log_level=G_LOG_LEVEL_CRITICAL, format=<optimized out>, args=args@entry=0x7fffffffd720) at gmessages.c:1362
#2 0x00007ffff5ee9baf in g_log (log_domain=log_domain@entry=0x7ffff671a738 "GLib-GIO", log_level=log_level@entry=G_LOG_LEVEL_CRITICAL, format=format@entry=0x7ffff5f347ea "%s: assertion '%s' failed") at gmessages.c:1403
#3 0x00007ffff5eea0f9 in g_return_if_fail_warning (log_domain=log_domain@entry=0x7ffff671a738 "GLib-GIO", pretty_function=pretty_function@entry=0x7ffff673fc10 <__func__.25628> "g_dbus_proxy_call_internal", expression=expression@entry=0x7ffff673fb1c "G_IS_DBUS_PROXY (proxy)") at gmessages.c:2702
#4 0x00007ffff66cdc5f in g_dbus_proxy_call_internal (proxy=0x0, method_name=method_name@entry=0x555555810510 "Scan", parameters=0x555555c7a530, flags=flags@entry=G_DBUS_CALL_FLAGS_NONE, timeout_msec=timeout_msec@entry=-1, fd_list=fd_list@entry=0x0, cancellable=0x0, callback=0x55555574cb96 <scan_request_cb>, user_data=0x555555ac2220) at gdbusproxy.c:2664
#5 0x00007ffff66cf686 in g_dbus_proxy_call (proxy=<optimized out>, method_name=method_name@entry=0x555555810510 "Scan", parameters=<optimized out>, flags=flags@entry=G_DBUS_CALL_FLAGS_NONE, timeout_msec=timeout_msec@entry=-1, cancellable=cancellable@entry=0x0, callback=0x55555574cb96 <scan_request_cb>, user_data=0x555555ac2220) at gdbusproxy.c:2970
#6 0x000055555574e026 in nm_supplicant_interface_request_scan (self=0x555555ac2220 [NMSupplicantInterface], ssids=ssids@entry=0x0) at src/supplicant/nm-supplicant-interface.c:1821
#7 0x00007fffe1038276 in request_wireless_scan (self=self@entry=0x555555c6ee60 [NMDeviceWifi], periodic=periodic@entry=0, force_if_scanning=force_if_scanning@entry=0, ssids=<optimized out>, ssids@entry=0x0) at src/devices/wifi/nm-device-wifi.c:1347
#8 0x00007fffe1039011 in device_state_changed (device=0x555555c6ee60 [NMDeviceWifi], new_state=NM_DEVICE_STATE_DISCONNECTED, old_state=<optimized out>, reason=<optimized out>)
at src/devices/wifi/nm-device-wifi.c:2998
#9 0x00007ffff432ed1e in ffi_call_unix64 () at ../src/x86/unix64.S:76
#10 0x00007ffff432e68f in ffi_call (cif=cif@entry=0x7fffffffdc70, fn=fn@entry=0x7fffe1038e1e <device_state_changed>, rvalue=<optimized out>, avalue=avalue@entry=0x7fffffffdb60)
at ../src/x86/ffi64.c:525
#15 0x00007ffff63db66f in <emit signal ??? on instance 0x555555c6ee60 [NMDeviceWifi]> (instance=instance@entry=0x555555c6ee60, signal_id=<optimized out>, detail=detail@entry=0)
at gsignal.c:3447
#11 0x00007ffff63bff39 in g_cclosure_marshal_generic (closure=0x555555c22ea0, return_gvalue=0x0, n_param_values=<optimized out>, param_values=<optimized out>, invocation_hint=<optimized out>, marshal_data=<optimized out>) at gclosure.c:1490
#12 0x00007ffff63bf73d in g_closure_invoke (closure=0x555555c22ea0, return_value=0x0, n_param_values=4, param_values=0x7fffffffdea0, invocation_hint=0x7fffffffde20) at gclosure.c:804
#13 0x00007ffff63d1f30 in signal_emit_unlocked_R (node=node@entry=0x555555c22750, detail=detail@entry=0, instance=instance@entry=0x555555c6ee60, emission_return=emission_return@entry=0x0, instance_and_params=instance_and_params@entry=0x7fffffffdea0) at gsignal.c:3673
#14 0x00007ffff63dad05 in g_signal_emit_valist (instance=0x555555c6ee60, signal_id=<optimized out>, detail=0, var_args=var_args@entry=0x7fffffffe0b0) at gsignal.c:3391
#16 0x00005555556f0f18 in _set_state_full (self=self@entry=0x555555c6ee60 [NMDeviceWifi], state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_ASSUMED, quitting=quitting@entry=0) at src/devices/nm-device.c:13268
#17 0x00005555556f1774 in nm_device_state_changed (self=self@entry=0x555555c6ee60 [NMDeviceWifi], state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_ASSUMED) at src/devices/nm-device.c:13435
#18 0x00005555555bcf95 in recheck_assume_connection (self=self@entry=0x555555b09140 [NMManager], device=device@entry=0x555555c6ee60 [NMDeviceWifi]) at src/nm-manager.c:2297
#19 0x00005555555bd53e in _device_realize_finish (self=self@entry=0x555555b09140 [NMManager], device=device@entry=0x555555c6ee60 [NMDeviceWifi], plink=plink@entry=0x555555ae43d8)
at src/nm-manager.c:2473
#20 0x00005555555c01d0 in platform_link_added (self=self@entry=0x555555b09140 [NMManager], ifindex=<optimized out>, plink=plink@entry=0x555555ae43d8, guess_assume=<optimized out>, dev_state=<optimized out>) at src/nm-manager.c:2789
#21 0x00005555555c0cec in platform_query_devices (self=self@entry=0x555555b09140 [NMManager]) at src/nm-manager.c:2901
#22 0x00005555555c439e in nm_manager_start (self=0x555555b09140 [NMManager], error=<optimized out>) at src/nm-manager.c:5632
#23 0x000055555558498e in main (argc=<optimized out>, argv=<optimized out>) at src/main.c:413
2018-04-04 14:48:52 +02:00
|
|
|
}
|
|
|
|
|
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
nm_device_assume_state_get (device,
|
|
|
|
|
&assume_state_guess_assume,
|
|
|
|
|
&assume_state_connection_uuid);
|
2013-06-27 14:39:13 +02:00
|
|
|
|
Revert "core: merge branch 'bg/restart-assume-rh1551958'"
This reverts commit cc1920d71470042c4e0837848da9183526b663d0, reversing
changes made to eb8257dea5802a004af9cccacb30af98440e2172.
This breaks restart, at least for Wi-Fi devices:
#0 0x00007ffff5ee8771 in _g_log_abort (breakpoint=breakpoint@entry=1) at gmessages.c:554
#1 0x00007ffff5ee9a5b in g_logv (log_domain=0x7ffff671a738 "GLib-GIO", log_level=G_LOG_LEVEL_CRITICAL, format=<optimized out>, args=args@entry=0x7fffffffd720) at gmessages.c:1362
#2 0x00007ffff5ee9baf in g_log (log_domain=log_domain@entry=0x7ffff671a738 "GLib-GIO", log_level=log_level@entry=G_LOG_LEVEL_CRITICAL, format=format@entry=0x7ffff5f347ea "%s: assertion '%s' failed") at gmessages.c:1403
#3 0x00007ffff5eea0f9 in g_return_if_fail_warning (log_domain=log_domain@entry=0x7ffff671a738 "GLib-GIO", pretty_function=pretty_function@entry=0x7ffff673fc10 <__func__.25628> "g_dbus_proxy_call_internal", expression=expression@entry=0x7ffff673fb1c "G_IS_DBUS_PROXY (proxy)") at gmessages.c:2702
#4 0x00007ffff66cdc5f in g_dbus_proxy_call_internal (proxy=0x0, method_name=method_name@entry=0x555555810510 "Scan", parameters=0x555555c7a530, flags=flags@entry=G_DBUS_CALL_FLAGS_NONE, timeout_msec=timeout_msec@entry=-1, fd_list=fd_list@entry=0x0, cancellable=0x0, callback=0x55555574cb96 <scan_request_cb>, user_data=0x555555ac2220) at gdbusproxy.c:2664
#5 0x00007ffff66cf686 in g_dbus_proxy_call (proxy=<optimized out>, method_name=method_name@entry=0x555555810510 "Scan", parameters=<optimized out>, flags=flags@entry=G_DBUS_CALL_FLAGS_NONE, timeout_msec=timeout_msec@entry=-1, cancellable=cancellable@entry=0x0, callback=0x55555574cb96 <scan_request_cb>, user_data=0x555555ac2220) at gdbusproxy.c:2970
#6 0x000055555574e026 in nm_supplicant_interface_request_scan (self=0x555555ac2220 [NMSupplicantInterface], ssids=ssids@entry=0x0) at src/supplicant/nm-supplicant-interface.c:1821
#7 0x00007fffe1038276 in request_wireless_scan (self=self@entry=0x555555c6ee60 [NMDeviceWifi], periodic=periodic@entry=0, force_if_scanning=force_if_scanning@entry=0, ssids=<optimized out>, ssids@entry=0x0) at src/devices/wifi/nm-device-wifi.c:1347
#8 0x00007fffe1039011 in device_state_changed (device=0x555555c6ee60 [NMDeviceWifi], new_state=NM_DEVICE_STATE_DISCONNECTED, old_state=<optimized out>, reason=<optimized out>)
at src/devices/wifi/nm-device-wifi.c:2998
#9 0x00007ffff432ed1e in ffi_call_unix64 () at ../src/x86/unix64.S:76
#10 0x00007ffff432e68f in ffi_call (cif=cif@entry=0x7fffffffdc70, fn=fn@entry=0x7fffe1038e1e <device_state_changed>, rvalue=<optimized out>, avalue=avalue@entry=0x7fffffffdb60)
at ../src/x86/ffi64.c:525
#15 0x00007ffff63db66f in <emit signal ??? on instance 0x555555c6ee60 [NMDeviceWifi]> (instance=instance@entry=0x555555c6ee60, signal_id=<optimized out>, detail=detail@entry=0)
at gsignal.c:3447
#11 0x00007ffff63bff39 in g_cclosure_marshal_generic (closure=0x555555c22ea0, return_gvalue=0x0, n_param_values=<optimized out>, param_values=<optimized out>, invocation_hint=<optimized out>, marshal_data=<optimized out>) at gclosure.c:1490
#12 0x00007ffff63bf73d in g_closure_invoke (closure=0x555555c22ea0, return_value=0x0, n_param_values=4, param_values=0x7fffffffdea0, invocation_hint=0x7fffffffde20) at gclosure.c:804
#13 0x00007ffff63d1f30 in signal_emit_unlocked_R (node=node@entry=0x555555c22750, detail=detail@entry=0, instance=instance@entry=0x555555c6ee60, emission_return=emission_return@entry=0x0, instance_and_params=instance_and_params@entry=0x7fffffffdea0) at gsignal.c:3673
#14 0x00007ffff63dad05 in g_signal_emit_valist (instance=0x555555c6ee60, signal_id=<optimized out>, detail=0, var_args=var_args@entry=0x7fffffffe0b0) at gsignal.c:3391
#16 0x00005555556f0f18 in _set_state_full (self=self@entry=0x555555c6ee60 [NMDeviceWifi], state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_ASSUMED, quitting=quitting@entry=0) at src/devices/nm-device.c:13268
#17 0x00005555556f1774 in nm_device_state_changed (self=self@entry=0x555555c6ee60 [NMDeviceWifi], state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_ASSUMED) at src/devices/nm-device.c:13435
#18 0x00005555555bcf95 in recheck_assume_connection (self=self@entry=0x555555b09140 [NMManager], device=device@entry=0x555555c6ee60 [NMDeviceWifi]) at src/nm-manager.c:2297
#19 0x00005555555bd53e in _device_realize_finish (self=self@entry=0x555555b09140 [NMManager], device=device@entry=0x555555c6ee60 [NMDeviceWifi], plink=plink@entry=0x555555ae43d8)
at src/nm-manager.c:2473
#20 0x00005555555c01d0 in platform_link_added (self=self@entry=0x555555b09140 [NMManager], ifindex=<optimized out>, plink=plink@entry=0x555555ae43d8, guess_assume=<optimized out>, dev_state=<optimized out>) at src/nm-manager.c:2789
#21 0x00005555555c0cec in platform_query_devices (self=self@entry=0x555555b09140 [NMManager]) at src/nm-manager.c:2901
#22 0x00005555555c439e in nm_manager_start (self=0x555555b09140 [NMManager], error=<optimized out>) at src/nm-manager.c:5632
#23 0x000055555558498e in main (argc=<optimized out>, argv=<optimized out>) at src/main.c:413
2018-04-04 14:48:52 +02:00
|
|
|
/* Now we need to compare the generated connection to each configured
|
|
|
|
|
* connection. The comparison function is the heart of the connection
|
|
|
|
|
* assumption implementation and it must compare the connections very
|
|
|
|
|
* carefully to sort out various corner cases. Also, the comparison is
|
|
|
|
|
* not entirely symmetric.
|
|
|
|
|
*
|
|
|
|
|
* When no configured connection matches the generated connection, we keep
|
|
|
|
|
* the generated connection instead.
|
|
|
|
|
*/
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
if ( assume_state_connection_uuid
|
|
|
|
|
&& (connection_checked = nm_settings_get_connection_by_uuid (priv->settings, assume_state_connection_uuid))
|
2018-08-08 11:33:31 +02:00
|
|
|
&& new_activation_allowed_for_connection (self, connection_checked)
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
&& nm_device_check_connection_compatible (device,
|
|
|
|
|
nm_settings_connection_get_connection (connection_checked),
|
|
|
|
|
NULL)) {
|
2018-04-05 11:19:54 +02:00
|
|
|
|
|
|
|
|
if (connection) {
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMConnection *con = nm_settings_connection_get_connection (connection_checked);
|
|
|
|
|
|
|
|
|
|
if (nm_utils_match_connection ((NMConnection *[]) { con, NULL },
|
|
|
|
|
connection,
|
|
|
|
|
TRUE,
|
|
|
|
|
nm_device_has_carrier (device),
|
|
|
|
|
nm_device_get_route_metric (device, AF_INET),
|
|
|
|
|
nm_device_get_route_metric (device, AF_INET6),
|
|
|
|
|
NULL, NULL))
|
|
|
|
|
matched = connection_checked;
|
2018-04-05 11:19:54 +02:00
|
|
|
} else
|
|
|
|
|
matched = connection_checked;
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
}
|
device: assume matching connections during first start
Since commit 2d1b85f (th/assume-vs-unmanaged-bgo746440), we clearly
distinguish between two modes when encountering devices with external
IP configuration:
a) external devices. For those devices we generate a volatile in-memory
connection and pretend it's active. However, the device must not be
touched by NetworkManager in any way.
b) assume, seamless take over. Mostly for restart of NetworkManager,
we activate a connection gracefully without going through an down-up
cycle. After the device reaches activated state, the device is
considered fully managed. For this only an existing, non volatile
connection can be used.
Before 'th/assume-vs-unmanaged-bgo746440', the behaviors were not
clearly separated.
Since then, we only choose to assume a connection (b) when the state
file indicates a matching connection. Now, extend this to also assume
connections when:
- during first-start (not after a restart) when there is no
state file yet.
- and, if we have an existing, non volatile, connection which
matches the device's configuration.
This patch lets NetworkManager assume connection also on first start.
That is for example useful when handing over network configuration from
initrd.
This only applies to existing, permanent, matching(!) connections, so it is a
good guess that the user wants NM to take over this interface. This brings us
closer to the previous behavior before 'th/assume-vs-unmanaged-bgo746440'.
https://bugzilla.redhat.com/show_bug.cgi?id=1439220
(cherry picked from commit 27b2477cb7dad2410c88c7dfca51f3aad208b881)
2017-04-19 16:16:12 +02:00
|
|
|
|
2018-04-05 11:19:54 +02:00
|
|
|
if (!matched && only_by_uuid) {
|
|
|
|
|
_LOG2D (LOGD_DEVICE, device, "assume: cannot generate connection: %s",
|
|
|
|
|
gen_error->message);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
if (!matched && assume_state_guess_assume) {
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
gs_free NMSettingsConnection **sett_conns = NULL;
|
device: assume matching connections during first start
Since commit 2d1b85f (th/assume-vs-unmanaged-bgo746440), we clearly
distinguish between two modes when encountering devices with external
IP configuration:
a) external devices. For those devices we generate a volatile in-memory
connection and pretend it's active. However, the device must not be
touched by NetworkManager in any way.
b) assume, seamless take over. Mostly for restart of NetworkManager,
we activate a connection gracefully without going through an down-up
cycle. After the device reaches activated state, the device is
considered fully managed. For this only an existing, non volatile
connection can be used.
Before 'th/assume-vs-unmanaged-bgo746440', the behaviors were not
clearly separated.
Since then, we only choose to assume a connection (b) when the state
file indicates a matching connection. Now, extend this to also assume
connections when:
- during first-start (not after a restart) when there is no
state file yet.
- and, if we have an existing, non volatile, connection which
matches the device's configuration.
This patch lets NetworkManager assume connection also on first start.
That is for example useful when handing over network configuration from
initrd.
This only applies to existing, permanent, matching(!) connections, so it is a
good guess that the user wants NM to take over this interface. This brings us
closer to the previous behavior before 'th/assume-vs-unmanaged-bgo746440'.
https://bugzilla.redhat.com/show_bug.cgi?id=1439220
(cherry picked from commit 27b2477cb7dad2410c88c7dfca51f3aad208b881)
2017-04-19 16:16:12 +02:00
|
|
|
guint len, i, j;
|
|
|
|
|
|
|
|
|
|
/* the state file doesn't indicate a connection UUID to assume. Search the
|
|
|
|
|
* persistent connections for a matching candidate. */
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
sett_conns = nm_manager_get_activatable_connections (self, FALSE, FALSE, &len);
|
device: assume matching connections during first start
Since commit 2d1b85f (th/assume-vs-unmanaged-bgo746440), we clearly
distinguish between two modes when encountering devices with external
IP configuration:
a) external devices. For those devices we generate a volatile in-memory
connection and pretend it's active. However, the device must not be
touched by NetworkManager in any way.
b) assume, seamless take over. Mostly for restart of NetworkManager,
we activate a connection gracefully without going through an down-up
cycle. After the device reaches activated state, the device is
considered fully managed. For this only an existing, non volatile
connection can be used.
Before 'th/assume-vs-unmanaged-bgo746440', the behaviors were not
clearly separated.
Since then, we only choose to assume a connection (b) when the state
file indicates a matching connection. Now, extend this to also assume
connections when:
- during first-start (not after a restart) when there is no
state file yet.
- and, if we have an existing, non volatile, connection which
matches the device's configuration.
This patch lets NetworkManager assume connection also on first start.
That is for example useful when handing over network configuration from
initrd.
This only applies to existing, permanent, matching(!) connections, so it is a
good guess that the user wants NM to take over this interface. This brings us
closer to the previous behavior before 'th/assume-vs-unmanaged-bgo746440'.
https://bugzilla.redhat.com/show_bug.cgi?id=1439220
(cherry picked from commit 27b2477cb7dad2410c88c7dfca51f3aad208b881)
2017-04-19 16:16:12 +02:00
|
|
|
if (len > 0) {
|
|
|
|
|
for (i = 0, j = 0; i < len; i++) {
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *sett_conn = sett_conns[i];
|
device: assume matching connections during first start
Since commit 2d1b85f (th/assume-vs-unmanaged-bgo746440), we clearly
distinguish between two modes when encountering devices with external
IP configuration:
a) external devices. For those devices we generate a volatile in-memory
connection and pretend it's active. However, the device must not be
touched by NetworkManager in any way.
b) assume, seamless take over. Mostly for restart of NetworkManager,
we activate a connection gracefully without going through an down-up
cycle. After the device reaches activated state, the device is
considered fully managed. For this only an existing, non volatile
connection can be used.
Before 'th/assume-vs-unmanaged-bgo746440', the behaviors were not
clearly separated.
Since then, we only choose to assume a connection (b) when the state
file indicates a matching connection. Now, extend this to also assume
connections when:
- during first-start (not after a restart) when there is no
state file yet.
- and, if we have an existing, non volatile, connection which
matches the device's configuration.
This patch lets NetworkManager assume connection also on first start.
That is for example useful when handing over network configuration from
initrd.
This only applies to existing, permanent, matching(!) connections, so it is a
good guess that the user wants NM to take over this interface. This brings us
closer to the previous behavior before 'th/assume-vs-unmanaged-bgo746440'.
https://bugzilla.redhat.com/show_bug.cgi?id=1439220
(cherry picked from commit 27b2477cb7dad2410c88c7dfca51f3aad208b881)
2017-04-19 16:16:12 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if ( sett_conn != connection_checked
|
|
|
|
|
&& nm_device_check_connection_compatible (device,
|
|
|
|
|
nm_settings_connection_get_connection (sett_conn),
|
|
|
|
|
NULL))
|
|
|
|
|
sett_conns[j++] = sett_conn;
|
device: assume matching connections during first start
Since commit 2d1b85f (th/assume-vs-unmanaged-bgo746440), we clearly
distinguish between two modes when encountering devices with external
IP configuration:
a) external devices. For those devices we generate a volatile in-memory
connection and pretend it's active. However, the device must not be
touched by NetworkManager in any way.
b) assume, seamless take over. Mostly for restart of NetworkManager,
we activate a connection gracefully without going through an down-up
cycle. After the device reaches activated state, the device is
considered fully managed. For this only an existing, non volatile
connection can be used.
Before 'th/assume-vs-unmanaged-bgo746440', the behaviors were not
clearly separated.
Since then, we only choose to assume a connection (b) when the state
file indicates a matching connection. Now, extend this to also assume
connections when:
- during first-start (not after a restart) when there is no
state file yet.
- and, if we have an existing, non volatile, connection which
matches the device's configuration.
This patch lets NetworkManager assume connection also on first start.
That is for example useful when handing over network configuration from
initrd.
This only applies to existing, permanent, matching(!) connections, so it is a
good guess that the user wants NM to take over this interface. This brings us
closer to the previous behavior before 'th/assume-vs-unmanaged-bgo746440'.
https://bugzilla.redhat.com/show_bug.cgi?id=1439220
(cherry picked from commit 27b2477cb7dad2410c88c7dfca51f3aad208b881)
2017-04-19 16:16:12 +02:00
|
|
|
}
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
sett_conns[j] = NULL;
|
device: assume matching connections during first start
Since commit 2d1b85f (th/assume-vs-unmanaged-bgo746440), we clearly
distinguish between two modes when encountering devices with external
IP configuration:
a) external devices. For those devices we generate a volatile in-memory
connection and pretend it's active. However, the device must not be
touched by NetworkManager in any way.
b) assume, seamless take over. Mostly for restart of NetworkManager,
we activate a connection gracefully without going through an down-up
cycle. After the device reaches activated state, the device is
considered fully managed. For this only an existing, non volatile
connection can be used.
Before 'th/assume-vs-unmanaged-bgo746440', the behaviors were not
clearly separated.
Since then, we only choose to assume a connection (b) when the state
file indicates a matching connection. Now, extend this to also assume
connections when:
- during first-start (not after a restart) when there is no
state file yet.
- and, if we have an existing, non volatile, connection which
matches the device's configuration.
This patch lets NetworkManager assume connection also on first start.
That is for example useful when handing over network configuration from
initrd.
This only applies to existing, permanent, matching(!) connections, so it is a
good guess that the user wants NM to take over this interface. This brings us
closer to the previous behavior before 'th/assume-vs-unmanaged-bgo746440'.
https://bugzilla.redhat.com/show_bug.cgi?id=1439220
(cherry picked from commit 27b2477cb7dad2410c88c7dfca51f3aad208b881)
2017-04-19 16:16:12 +02:00
|
|
|
len = j;
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (len > 0) {
|
|
|
|
|
gs_free NMConnection **conns = NULL;
|
|
|
|
|
NMConnection *con;
|
|
|
|
|
|
|
|
|
|
g_qsort_with_data (sett_conns, len, sizeof (sett_conns[0]),
|
|
|
|
|
nm_settings_connection_cmp_timestamp_p_with_data, NULL);
|
|
|
|
|
|
|
|
|
|
conns = nm_settings_connections_array_to_connections (sett_conns, len);
|
|
|
|
|
|
|
|
|
|
con = nm_utils_match_connection (conns,
|
|
|
|
|
connection,
|
|
|
|
|
FALSE,
|
|
|
|
|
nm_device_has_carrier (device),
|
|
|
|
|
nm_device_get_route_metric (device, AF_INET),
|
|
|
|
|
nm_device_get_route_metric (device, AF_INET6),
|
|
|
|
|
NULL,
|
|
|
|
|
NULL);
|
|
|
|
|
if (con) {
|
|
|
|
|
for (i = 0; i < len; i++) {
|
|
|
|
|
if (conns[i] == con) {
|
|
|
|
|
matched = sett_conns[i];
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
nm_assert (matched);
|
|
|
|
|
}
|
|
|
|
|
}
|
core: only assume connections that were managed in a previous run of NetworkManager
Before, we would have the concept of assumed connections, which is used
for (1) externally configured device that NetworkManager should not
touch and (2) connections that NetworkManager should gracefully take
over after a restart (seamlessly, non-destructively).
The behavior was unclear and mixed. It wasn't clear whether the device
is in no-touch mode (1) or gracefully take-over (2).
Previous commits already introduce separate activation types EXTERNAL (1)
and ASSUME (2).
Also, previously, we would for both (1) and (2) try to find a matching
connection and use it. That doesn't make sense for either.
In the external case (1), we should not pretend that an existing connection
is active. Let's always create a new in-memory connection for these
cases. Note that this means, external devices now will always generate
a connection, instead of pretending an existing one is active.
For the assume case (2), we shall not use nm_utils_match_connection() to
guess which connection might be active. It can only the one that was
active on a previous run of NetworkManager. So, use the information from
the state file and try to activate it. If that fails, it is not an
assume activation type. Note, that this means we now most of the time
don't do ASSUME anymore. Most of the time we do EXTERNAL activation
That is because the state information is only available after restart
of NetworkManager.
2017-03-08 08:45:11 +01:00
|
|
|
}
|
2013-06-27 14:39:13 +02:00
|
|
|
}
|
|
|
|
|
|
device: assume matching connections during first start
Since commit 2d1b85f (th/assume-vs-unmanaged-bgo746440), we clearly
distinguish between two modes when encountering devices with external
IP configuration:
a) external devices. For those devices we generate a volatile in-memory
connection and pretend it's active. However, the device must not be
touched by NetworkManager in any way.
b) assume, seamless take over. Mostly for restart of NetworkManager,
we activate a connection gracefully without going through an down-up
cycle. After the device reaches activated state, the device is
considered fully managed. For this only an existing, non volatile
connection can be used.
Before 'th/assume-vs-unmanaged-bgo746440', the behaviors were not
clearly separated.
Since then, we only choose to assume a connection (b) when the state
file indicates a matching connection. Now, extend this to also assume
connections when:
- during first-start (not after a restart) when there is no
state file yet.
- and, if we have an existing, non volatile, connection which
matches the device's configuration.
This patch lets NetworkManager assume connection also on first start.
That is for example useful when handing over network configuration from
initrd.
This only applies to existing, permanent, matching(!) connections, so it is a
good guess that the user wants NM to take over this interface. This brings us
closer to the previous behavior before 'th/assume-vs-unmanaged-bgo746440'.
https://bugzilla.redhat.com/show_bug.cgi?id=1439220
(cherry picked from commit 27b2477cb7dad2410c88c7dfca51f3aad208b881)
2017-04-19 16:16:12 +02:00
|
|
|
if (matched) {
|
2017-06-07 16:41:06 +02:00
|
|
|
_LOG2I (LOGD_DEVICE, device, "assume: will attempt to assume matching connection '%s' (%s)%s",
|
2017-06-07 12:46:10 +02:00
|
|
|
nm_settings_connection_get_id (matched),
|
|
|
|
|
nm_settings_connection_get_uuid (matched),
|
|
|
|
|
assume_state_connection_uuid && nm_streq (assume_state_connection_uuid, nm_settings_connection_get_uuid (matched))
|
|
|
|
|
? " (indicated)" : " (guessed)");
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
nm_device_assume_state_reset (device);
|
device: assume matching connections during first start
Since commit 2d1b85f (th/assume-vs-unmanaged-bgo746440), we clearly
distinguish between two modes when encountering devices with external
IP configuration:
a) external devices. For those devices we generate a volatile in-memory
connection and pretend it's active. However, the device must not be
touched by NetworkManager in any way.
b) assume, seamless take over. Mostly for restart of NetworkManager,
we activate a connection gracefully without going through an down-up
cycle. After the device reaches activated state, the device is
considered fully managed. For this only an existing, non volatile
connection can be used.
Before 'th/assume-vs-unmanaged-bgo746440', the behaviors were not
clearly separated.
Since then, we only choose to assume a connection (b) when the state
file indicates a matching connection. Now, extend this to also assume
connections when:
- during first-start (not after a restart) when there is no
state file yet.
- and, if we have an existing, non volatile, connection which
matches the device's configuration.
This patch lets NetworkManager assume connection also on first start.
That is for example useful when handing over network configuration from
initrd.
This only applies to existing, permanent, matching(!) connections, so it is a
good guess that the user wants NM to take over this interface. This brings us
closer to the previous behavior before 'th/assume-vs-unmanaged-bgo746440'.
https://bugzilla.redhat.com/show_bug.cgi?id=1439220
(cherry picked from commit 27b2477cb7dad2410c88c7dfca51f3aad208b881)
2017-04-19 16:16:12 +02:00
|
|
|
return matched;
|
|
|
|
|
}
|
|
|
|
|
|
2017-06-07 16:41:06 +02:00
|
|
|
_LOG2D (LOGD_DEVICE, device, "assume: generated connection '%s' (%s)",
|
|
|
|
|
nm_connection_get_id (connection),
|
|
|
|
|
nm_connection_get_uuid (connection));
|
2013-08-23 15:45:17 +02:00
|
|
|
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
nm_device_assume_state_reset (device);
|
|
|
|
|
|
2013-08-23 15:45:17 +02:00
|
|
|
added = nm_settings_add_connection (priv->settings, connection, FALSE, &error);
|
2017-06-07 16:41:06 +02:00
|
|
|
if (!added) {
|
|
|
|
|
_LOG2W (LOGD_SETTINGS, device, "assume: failure to save generated connection '%s': %s",
|
2016-03-02 11:38:26 +01:00
|
|
|
nm_connection_get_id (connection),
|
2016-02-28 16:25:36 +01:00
|
|
|
error->message);
|
2017-06-07 16:41:06 +02:00
|
|
|
g_error_free (error);
|
|
|
|
|
return NULL;
|
2013-08-23 15:45:17 +02:00
|
|
|
}
|
|
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
nm_settings_connection_set_flags (added,
|
2018-04-05 20:02:45 +02:00
|
|
|
NM_SETTINGS_CONNECTION_INT_FLAGS_NM_GENERATED |
|
|
|
|
|
NM_SETTINGS_CONNECTION_INT_FLAGS_VOLATILE,
|
2017-06-07 16:41:06 +02:00
|
|
|
TRUE);
|
|
|
|
|
NM_SET_OUT (out_generated, TRUE);
|
|
|
|
|
return added;
|
2013-06-27 14:39:13 +02:00
|
|
|
}
|
|
|
|
|
|
2014-06-20 20:13:14 +02:00
|
|
|
static gboolean
|
core: only assume connections that were managed in a previous run of NetworkManager
Before, we would have the concept of assumed connections, which is used
for (1) externally configured device that NetworkManager should not
touch and (2) connections that NetworkManager should gracefully take
over after a restart (seamlessly, non-destructively).
The behavior was unclear and mixed. It wasn't clear whether the device
is in no-touch mode (1) or gracefully take-over (2).
Previous commits already introduce separate activation types EXTERNAL (1)
and ASSUME (2).
Also, previously, we would for both (1) and (2) try to find a matching
connection and use it. That doesn't make sense for either.
In the external case (1), we should not pretend that an existing connection
is active. Let's always create a new in-memory connection for these
cases. Note that this means, external devices now will always generate
a connection, instead of pretending an existing one is active.
For the assume case (2), we shall not use nm_utils_match_connection() to
guess which connection might be active. It can only the one that was
active on a previous run of NetworkManager. So, use the information from
the state file and try to activate it. If that fails, it is not an
assume activation type. Note, that this means we now most of the time
don't do ASSUME anymore. Most of the time we do EXTERNAL activation
That is because the state information is only available after restart
of NetworkManager.
2017-03-08 08:45:11 +01:00
|
|
|
recheck_assume_connection (NMManager *self,
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
NMDevice *device)
|
2014-05-28 10:18:34 -04:00
|
|
|
{
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *sett_conn;
|
2017-03-09 16:58:37 +01:00
|
|
|
gboolean was_unmanaged = FALSE;
|
|
|
|
|
gboolean generated = FALSE;
|
2014-08-01 22:46:49 +02:00
|
|
|
NMDeviceState state;
|
2014-05-28 10:18:34 -04:00
|
|
|
|
2015-12-04 17:17:47 +01:00
|
|
|
g_return_val_if_fail (NM_IS_MANAGER (self), FALSE);
|
|
|
|
|
g_return_val_if_fail (NM_IS_DEVICE (device), FALSE);
|
|
|
|
|
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
if (!nm_device_get_managed (device, FALSE)) {
|
|
|
|
|
nm_device_assume_state_reset (device);
|
2017-06-07 13:30:08 +02:00
|
|
|
_LOG2D (LOGD_DEVICE, device, "assume: don't assume because %s", "not managed");
|
2014-06-20 20:13:14 +02:00
|
|
|
return FALSE;
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
}
|
2014-05-28 10:18:34 -04:00
|
|
|
|
2014-12-03 14:24:18 -06:00
|
|
|
state = nm_device_get_state (device);
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
if (state > NM_DEVICE_STATE_DISCONNECTED) {
|
|
|
|
|
nm_device_assume_state_reset (device);
|
2017-06-07 13:30:08 +02:00
|
|
|
_LOG2D (LOGD_DEVICE, device, "assume: don't assume due to device state %s",
|
|
|
|
|
nm_device_state_to_str (state));
|
2017-03-13 15:34:14 +01:00
|
|
|
return FALSE;
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
}
|
2017-03-13 15:34:14 +01:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
sett_conn = get_existing_connection (self, device, &generated);
|
2017-06-07 16:41:06 +02:00
|
|
|
/* log no reason. get_existing_connection() already does it. */
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (!sett_conn)
|
2014-06-20 20:13:14 +02:00
|
|
|
return FALSE;
|
2017-03-09 16:58:37 +01:00
|
|
|
|
2017-06-01 22:04:26 +02:00
|
|
|
nm_device_sys_iface_state_set (device,
|
|
|
|
|
generated
|
|
|
|
|
? NM_DEVICE_SYS_IFACE_STATE_EXTERNAL
|
|
|
|
|
: NM_DEVICE_SYS_IFACE_STATE_ASSUME);
|
2017-03-13 15:34:14 +01:00
|
|
|
|
2017-03-09 16:58:37 +01:00
|
|
|
/* Move device to DISCONNECTED to activate the connection */
|
2014-08-01 22:46:49 +02:00
|
|
|
if (state == NM_DEVICE_STATE_UNMANAGED) {
|
2014-05-28 10:18:34 -04:00
|
|
|
was_unmanaged = TRUE;
|
|
|
|
|
nm_device_state_changed (device,
|
|
|
|
|
NM_DEVICE_STATE_UNAVAILABLE,
|
|
|
|
|
NM_DEVICE_STATE_REASON_CONNECTION_ASSUMED);
|
|
|
|
|
}
|
2017-03-09 16:58:37 +01:00
|
|
|
if (nm_device_get_state (device) == NM_DEVICE_STATE_UNAVAILABLE) {
|
|
|
|
|
nm_device_state_changed (device,
|
|
|
|
|
NM_DEVICE_STATE_DISCONNECTED,
|
|
|
|
|
NM_DEVICE_STATE_REASON_CONNECTION_ASSUMED);
|
|
|
|
|
}
|
2014-05-28 10:18:34 -04:00
|
|
|
|
2017-03-09 16:58:37 +01:00
|
|
|
g_return_val_if_fail (nm_device_get_state (device) >= NM_DEVICE_STATE_DISCONNECTED, FALSE);
|
|
|
|
|
|
|
|
|
|
{
|
|
|
|
|
gs_unref_object NMActiveConnection *active = NULL;
|
|
|
|
|
gs_unref_object NMAuthSubject *subject = NULL;
|
|
|
|
|
NMActiveConnection *master_ac;
|
|
|
|
|
GError *error = NULL;
|
|
|
|
|
|
|
|
|
|
subject = nm_auth_subject_new_internal ();
|
2018-04-12 11:32:18 +02:00
|
|
|
active = _new_active_connection (self,
|
|
|
|
|
FALSE,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
sett_conn,
|
|
|
|
|
NULL,
|
2018-04-12 11:32:18 +02:00
|
|
|
NULL,
|
|
|
|
|
NULL,
|
|
|
|
|
device,
|
|
|
|
|
subject,
|
2017-03-09 16:58:37 +01:00
|
|
|
generated ? NM_ACTIVATION_TYPE_EXTERNAL : NM_ACTIVATION_TYPE_ASSUME,
|
2018-04-19 13:24:11 +02:00
|
|
|
generated ? NM_ACTIVATION_REASON_EXTERNAL : NM_ACTIVATION_REASON_ASSUME,
|
2017-03-09 16:58:37 +01:00
|
|
|
&error);
|
|
|
|
|
|
|
|
|
|
if (!active) {
|
2017-06-07 13:30:08 +02:00
|
|
|
_LOGW (LOGD_DEVICE, "assume: assumed connection %s failed to activate: %s",
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
nm_dbus_object_get_path (NM_DBUS_OBJECT (sett_conn)),
|
2017-03-09 16:58:37 +01:00
|
|
|
error->message);
|
|
|
|
|
g_error_free (error);
|
|
|
|
|
|
|
|
|
|
if (was_unmanaged) {
|
|
|
|
|
nm_device_state_changed (device,
|
|
|
|
|
NM_DEVICE_STATE_UNAVAILABLE,
|
|
|
|
|
NM_DEVICE_STATE_REASON_CONFIG_FAILED);
|
|
|
|
|
}
|
2014-06-20 20:13:14 +02:00
|
|
|
|
2017-03-09 16:58:37 +01:00
|
|
|
if (generated) {
|
2017-06-07 13:30:08 +02:00
|
|
|
_LOG2D (LOGD_DEVICE, device, "assume: deleting generated connection after assuming failed");
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
nm_settings_connection_delete (sett_conn, NULL);
|
2017-03-13 15:34:14 +01:00
|
|
|
} else {
|
|
|
|
|
if (nm_device_sys_iface_state_get (device) == NM_DEVICE_SYS_IFACE_STATE_ASSUME)
|
|
|
|
|
nm_device_sys_iface_state_set (device, NM_DEVICE_SYS_IFACE_STATE_EXTERNAL);
|
2017-03-09 16:58:37 +01:00
|
|
|
}
|
|
|
|
|
return FALSE;
|
2014-05-28 10:18:34 -04:00
|
|
|
}
|
2017-03-09 16:58:37 +01:00
|
|
|
|
|
|
|
|
/* If the device is a slave or VLAN, find the master ActiveConnection */
|
|
|
|
|
master_ac = NULL;
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if ( find_master (self,
|
|
|
|
|
nm_settings_connection_get_connection (sett_conn),
|
|
|
|
|
device,
|
|
|
|
|
NULL,
|
|
|
|
|
NULL,
|
|
|
|
|
&master_ac,
|
|
|
|
|
NULL)
|
|
|
|
|
&& master_ac)
|
2017-03-09 16:58:37 +01:00
|
|
|
nm_active_connection_set_master (active, master_ac);
|
|
|
|
|
|
|
|
|
|
active_connection_add (self, active);
|
|
|
|
|
nm_device_queue_activation (device, NM_ACT_REQUEST (active));
|
2014-05-28 10:18:34 -04:00
|
|
|
}
|
2014-06-20 20:13:14 +02:00
|
|
|
|
2017-03-09 16:58:37 +01:00
|
|
|
return TRUE;
|
2014-05-28 10:18:34 -04:00
|
|
|
}
|
|
|
|
|
|
2015-12-04 17:17:47 +01:00
|
|
|
static void
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
recheck_assume_connection_cb (NMManager *self, NMDevice *device)
|
2015-12-04 17:17:47 +01:00
|
|
|
{
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
recheck_assume_connection (self, device);
|
2015-12-04 17:17:47 +01:00
|
|
|
}
|
|
|
|
|
|
2016-12-26 11:12:39 +01:00
|
|
|
static void
|
|
|
|
|
device_ifindex_changed (NMDevice *device,
|
|
|
|
|
GParamSpec *pspec,
|
|
|
|
|
NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
_parent_notify_changed (self, device, FALSE);
|
|
|
|
|
}
|
|
|
|
|
|
2014-06-12 13:27:14 -05:00
|
|
|
static void
|
|
|
|
|
device_ip_iface_changed (NMDevice *device,
|
|
|
|
|
GParamSpec *pspec,
|
|
|
|
|
NMManager *self)
|
|
|
|
|
{
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2014-06-12 13:27:14 -05:00
|
|
|
const char *ip_iface = nm_device_get_ip_iface (device);
|
2017-10-12 14:53:43 +02:00
|
|
|
NMDeviceType device_type = nm_device_get_device_type (device);
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMDevice *candidate;
|
2014-06-12 13:27:14 -05:00
|
|
|
|
|
|
|
|
/* Remove NMDevice objects that are actually child devices of others,
|
|
|
|
|
* when the other device finally knows its IP interface name. For example,
|
|
|
|
|
* remove the PPP interface that's a child of a WWAN device, since it's
|
|
|
|
|
* not really a standalone NMDevice.
|
|
|
|
|
*/
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_for_each_entry (candidate, &priv->devices_lst_head, devices_lst) {
|
2014-06-12 13:27:14 -05:00
|
|
|
if ( candidate != device
|
2014-10-15 21:17:45 -05:00
|
|
|
&& g_strcmp0 (nm_device_get_iface (candidate), ip_iface) == 0
|
2017-10-12 14:53:43 +02:00
|
|
|
&& nm_device_get_device_type (candidate) == device_type
|
2014-10-15 21:17:45 -05:00
|
|
|
&& nm_device_is_real (candidate)) {
|
2014-10-13 11:58:26 -05:00
|
|
|
remove_device (self, candidate, FALSE, FALSE);
|
2014-06-12 13:27:14 -05:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-01-07 17:54:38 +01:00
|
|
|
static void
|
|
|
|
|
device_iface_changed (NMDevice *device,
|
|
|
|
|
GParamSpec *pspec,
|
|
|
|
|
NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
/* Virtual connections may refer to the new device name as
|
|
|
|
|
* parent device, retry to activate them.
|
|
|
|
|
*/
|
|
|
|
|
retry_connections_for_parent_device (self, device);
|
|
|
|
|
}
|
|
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
static void
|
|
|
|
|
_emit_device_added_removed (NMManager *self,
|
|
|
|
|
NMDevice *device,
|
|
|
|
|
gboolean is_added)
|
|
|
|
|
{
|
|
|
|
|
nm_dbus_object_emit_signal (NM_DBUS_OBJECT (self),
|
|
|
|
|
&interface_info_manager,
|
|
|
|
|
is_added
|
|
|
|
|
? &signal_info_device_added
|
|
|
|
|
: &signal_info_device_removed,
|
|
|
|
|
"(o)",
|
|
|
|
|
nm_dbus_object_get_path (NM_DBUS_OBJECT (device)));
|
|
|
|
|
g_signal_emit (self,
|
|
|
|
|
signals[is_added ? DEVICE_ADDED : DEVICE_REMOVED],
|
|
|
|
|
0,
|
|
|
|
|
device);
|
|
|
|
|
_notify (self, PROP_DEVICES);
|
|
|
|
|
}
|
2016-01-07 17:54:38 +01:00
|
|
|
|
2014-09-24 16:58:07 -05:00
|
|
|
static void
|
|
|
|
|
device_realized (NMDevice *device,
|
|
|
|
|
GParamSpec *pspec,
|
|
|
|
|
NMManager *self)
|
|
|
|
|
{
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
_emit_device_added_removed (self, device, nm_device_is_real (device));
|
device: remove default-unmanaged and refactor unmanaged flags
Get rid of NM_UNMANAGED_DEFAULT and refine the interaction between
unmanaged flags, device state and managed property.
Previously, the NM_UNMANAGED_DEFAULT was special in that a device was
still considered managed if it had solely the NM_UNMANAGED_DEFAULT flag
set and its state was managed. Thus, whether the device (state) was managed,
depended on the device state too.
Now, a device is considered managed (or unmanaged) based on the unmanaged
flags and realization state alone. At the same time, the device state
directly corresponds to the managed property of the device. Of course,
while changing the unmanaged flags, that invariant is shortly violated
until the state transistion is complete.
Introduce more unmanaged flags whereas some of them are non-authorative.
For example, the EXTERNAL_DOWN flag has only effect as long as the user
didn't explicitly manage the device (NM_UNMANAGED_USER_EXPLICIT). In other
words, certain flags can render other flags ineffective. Whether the device
is considered managed depends on the flags but also at the explicitly unset flags.
In a way, this is similar to previous where NM_UNMANAGED_DEFAULT was ignored
(if no other flags were present).
Also, previously a device that was NM_UNMANAGED_DEFAULT and in disconnected
state would transition back to unmanaged. No longer do that. Once a device is
managed, it stays managed as long as the flags indicate it should be managed.
However, the user can also modify the unmanaged flags via the D-Bus API.
Also get rid or nm_device_finish_init(). That was previously called
by NMManager after add_device(). As we now realize devices (possibly
multiple times) this should be handled during realization.
https://bugzilla.gnome.org/show_bug.cgi?id=746566
2015-09-15 15:35:16 +02:00
|
|
|
}
|
2014-10-06 11:21:54 -05:00
|
|
|
|
2017-03-20 13:36:00 +00:00
|
|
|
static void
|
2017-03-27 15:22:22 +00:00
|
|
|
device_connectivity_changed (NMDevice *device,
|
2018-07-03 19:20:45 +02:00
|
|
|
GParamSpec *pspec,
|
2017-03-27 15:22:22 +00:00
|
|
|
NMManager *self)
|
2017-03-20 13:36:00 +00:00
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2017-03-27 15:22:22 +00:00
|
|
|
NMConnectivityState best_state = NM_CONNECTIVITY_UNKNOWN;
|
|
|
|
|
NMConnectivityState state;
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMDevice *dev;
|
2017-03-20 13:36:00 +00:00
|
|
|
|
2018-04-11 11:35:14 +02:00
|
|
|
best_state = nm_device_get_connectivity_state (device);
|
|
|
|
|
if (best_state < NM_CONNECTIVITY_FULL) {
|
|
|
|
|
c_list_for_each_entry (dev, &priv->devices_lst_head, devices_lst) {
|
|
|
|
|
state = nm_device_get_connectivity_state (dev);
|
|
|
|
|
if (state <= best_state)
|
|
|
|
|
continue;
|
2017-03-27 15:22:22 +00:00
|
|
|
best_state = state;
|
2018-04-11 11:35:14 +02:00
|
|
|
if (best_state >= NM_CONNECTIVITY_FULL) {
|
|
|
|
|
/* it doesn't get better than this. */
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
2017-03-20 13:36:00 +00:00
|
|
|
}
|
2018-04-11 11:35:14 +02:00
|
|
|
nm_assert (best_state <= NM_CONNECTIVITY_FULL);
|
2017-03-20 13:36:00 +00:00
|
|
|
|
2017-03-27 15:22:22 +00:00
|
|
|
if (best_state != priv->connectivity_state) {
|
|
|
|
|
priv->connectivity_state = best_state;
|
2017-03-20 13:36:00 +00:00
|
|
|
|
|
|
|
|
_LOGD (LOGD_CORE, "connectivity checking indicates %s",
|
|
|
|
|
nm_connectivity_state_to_string (priv->connectivity_state));
|
|
|
|
|
|
|
|
|
|
nm_manager_update_state (self);
|
|
|
|
|
_notify (self, PROP_CONNECTIVITY);
|
|
|
|
|
nm_dispatcher_call_connectivity (priv->connectivity_state, NULL, NULL, NULL);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
device: remove default-unmanaged and refactor unmanaged flags
Get rid of NM_UNMANAGED_DEFAULT and refine the interaction between
unmanaged flags, device state and managed property.
Previously, the NM_UNMANAGED_DEFAULT was special in that a device was
still considered managed if it had solely the NM_UNMANAGED_DEFAULT flag
set and its state was managed. Thus, whether the device (state) was managed,
depended on the device state too.
Now, a device is considered managed (or unmanaged) based on the unmanaged
flags and realization state alone. At the same time, the device state
directly corresponds to the managed property of the device. Of course,
while changing the unmanaged flags, that invariant is shortly violated
until the state transistion is complete.
Introduce more unmanaged flags whereas some of them are non-authorative.
For example, the EXTERNAL_DOWN flag has only effect as long as the user
didn't explicitly manage the device (NM_UNMANAGED_USER_EXPLICIT). In other
words, certain flags can render other flags ineffective. Whether the device
is considered managed depends on the flags but also at the explicitly unset flags.
In a way, this is similar to previous where NM_UNMANAGED_DEFAULT was ignored
(if no other flags were present).
Also, previously a device that was NM_UNMANAGED_DEFAULT and in disconnected
state would transition back to unmanaged. No longer do that. Once a device is
managed, it stays managed as long as the flags indicate it should be managed.
However, the user can also modify the unmanaged flags via the D-Bus API.
Also get rid or nm_device_finish_init(). That was previously called
by NMManager after add_device(). As we now realize devices (possibly
multiple times) this should be handled during realization.
https://bugzilla.gnome.org/show_bug.cgi?id=746566
2015-09-15 15:35:16 +02:00
|
|
|
static void
|
core: only assume connections that were managed in a previous run of NetworkManager
Before, we would have the concept of assumed connections, which is used
for (1) externally configured device that NetworkManager should not
touch and (2) connections that NetworkManager should gracefully take
over after a restart (seamlessly, non-destructively).
The behavior was unclear and mixed. It wasn't clear whether the device
is in no-touch mode (1) or gracefully take-over (2).
Previous commits already introduce separate activation types EXTERNAL (1)
and ASSUME (2).
Also, previously, we would for both (1) and (2) try to find a matching
connection and use it. That doesn't make sense for either.
In the external case (1), we should not pretend that an existing connection
is active. Let's always create a new in-memory connection for these
cases. Note that this means, external devices now will always generate
a connection, instead of pretending an existing one is active.
For the assume case (2), we shall not use nm_utils_match_connection() to
guess which connection might be active. It can only the one that was
active on a previous run of NetworkManager. So, use the information from
the state file and try to activate it. If that fails, it is not an
assume activation type. Note, that this means we now most of the time
don't do ASSUME anymore. Most of the time we do EXTERNAL activation
That is because the state information is only available after restart
of NetworkManager.
2017-03-08 08:45:11 +01:00
|
|
|
_device_realize_finish (NMManager *self,
|
|
|
|
|
NMDevice *device,
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
const NMPlatformLink *plink)
|
device: remove default-unmanaged and refactor unmanaged flags
Get rid of NM_UNMANAGED_DEFAULT and refine the interaction between
unmanaged flags, device state and managed property.
Previously, the NM_UNMANAGED_DEFAULT was special in that a device was
still considered managed if it had solely the NM_UNMANAGED_DEFAULT flag
set and its state was managed. Thus, whether the device (state) was managed,
depended on the device state too.
Now, a device is considered managed (or unmanaged) based on the unmanaged
flags and realization state alone. At the same time, the device state
directly corresponds to the managed property of the device. Of course,
while changing the unmanaged flags, that invariant is shortly violated
until the state transistion is complete.
Introduce more unmanaged flags whereas some of them are non-authorative.
For example, the EXTERNAL_DOWN flag has only effect as long as the user
didn't explicitly manage the device (NM_UNMANAGED_USER_EXPLICIT). In other
words, certain flags can render other flags ineffective. Whether the device
is considered managed depends on the flags but also at the explicitly unset flags.
In a way, this is similar to previous where NM_UNMANAGED_DEFAULT was ignored
(if no other flags were present).
Also, previously a device that was NM_UNMANAGED_DEFAULT and in disconnected
state would transition back to unmanaged. No longer do that. Once a device is
managed, it stays managed as long as the flags indicate it should be managed.
However, the user can also modify the unmanaged flags via the D-Bus API.
Also get rid or nm_device_finish_init(). That was previously called
by NMManager after add_device(). As we now realize devices (possibly
multiple times) this should be handled during realization.
https://bugzilla.gnome.org/show_bug.cgi?id=746566
2015-09-15 15:35:16 +02:00
|
|
|
{
|
|
|
|
|
g_return_if_fail (NM_IS_MANAGER (self));
|
|
|
|
|
g_return_if_fail (NM_IS_DEVICE (device));
|
|
|
|
|
|
|
|
|
|
nm_device_realize_finish (device, plink);
|
|
|
|
|
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
if (!nm_device_get_managed (device, FALSE)) {
|
|
|
|
|
nm_device_assume_state_reset (device);
|
2014-09-24 16:58:07 -05:00
|
|
|
return;
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
}
|
2014-09-24 16:58:07 -05:00
|
|
|
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
if (recheck_assume_connection (self, device))
|
2014-09-24 16:58:07 -05:00
|
|
|
return;
|
|
|
|
|
|
device: remove default-unmanaged and refactor unmanaged flags
Get rid of NM_UNMANAGED_DEFAULT and refine the interaction between
unmanaged flags, device state and managed property.
Previously, the NM_UNMANAGED_DEFAULT was special in that a device was
still considered managed if it had solely the NM_UNMANAGED_DEFAULT flag
set and its state was managed. Thus, whether the device (state) was managed,
depended on the device state too.
Now, a device is considered managed (or unmanaged) based on the unmanaged
flags and realization state alone. At the same time, the device state
directly corresponds to the managed property of the device. Of course,
while changing the unmanaged flags, that invariant is shortly violated
until the state transistion is complete.
Introduce more unmanaged flags whereas some of them are non-authorative.
For example, the EXTERNAL_DOWN flag has only effect as long as the user
didn't explicitly manage the device (NM_UNMANAGED_USER_EXPLICIT). In other
words, certain flags can render other flags ineffective. Whether the device
is considered managed depends on the flags but also at the explicitly unset flags.
In a way, this is similar to previous where NM_UNMANAGED_DEFAULT was ignored
(if no other flags were present).
Also, previously a device that was NM_UNMANAGED_DEFAULT and in disconnected
state would transition back to unmanaged. No longer do that. Once a device is
managed, it stays managed as long as the flags indicate it should be managed.
However, the user can also modify the unmanaged flags via the D-Bus API.
Also get rid or nm_device_finish_init(). That was previously called
by NMManager after add_device(). As we now realize devices (possibly
multiple times) this should be handled during realization.
https://bugzilla.gnome.org/show_bug.cgi?id=746566
2015-09-15 15:35:16 +02:00
|
|
|
/* if we failed to assume a connection for the managed device, but the device
|
|
|
|
|
* is still unavailable. Set UNAVAILABLE state again, this time with NOW_MANAGED. */
|
|
|
|
|
nm_device_state_changed (device,
|
|
|
|
|
NM_DEVICE_STATE_UNAVAILABLE,
|
|
|
|
|
NM_DEVICE_STATE_REASON_NOW_MANAGED);
|
|
|
|
|
nm_device_emit_recheck_auto_activate (device);
|
2014-09-24 16:58:07 -05:00
|
|
|
}
|
|
|
|
|
|
2014-02-09 10:22:19 -06:00
|
|
|
/**
|
|
|
|
|
* add_device:
|
|
|
|
|
* @self: the #NMManager
|
|
|
|
|
* @device: the #NMDevice to add
|
2015-12-08 14:51:56 +01:00
|
|
|
* @error: (out): the #GError
|
2014-02-09 10:22:19 -06:00
|
|
|
*
|
|
|
|
|
* If successful, this function will increase the references count of @device.
|
|
|
|
|
* Callers should decrease the reference count.
|
|
|
|
|
*/
|
2015-12-08 14:51:56 +01:00
|
|
|
static gboolean
|
|
|
|
|
add_device (NMManager *self, NMDevice *device, GError **error)
|
2007-02-08 15:34:26 +00:00
|
|
|
{
|
2008-04-29 Dan Williams <dcbw@redhat.com>
Handle HAL dropouts better; allow NM to start up even if HAL isn't up yet.
* marshallers/nm-marshal.list
- Add marshaller
* src/NetworkManager.c
- (main): let the NMManager handle the NMHalManager
* src/nm-hal-manager.c
src/nm-hal-manager.h
- convert to a GObject, and emit singals when stuff changes. Let the
NMManager handle the signals, instead of the NMHalManager calling
into the NMManager.
* src/nm-manager.c
src/nm-manager.h
- (remove_one_device): consolidate device removals here
- (dispose): use remove_one_device()
- (nm_manager_get_device_by_udi): make static
- (deferred_hal_manager_query_devices): idle handler to query the HAL
manager for devices at startup or wakeup time
- (nm_manager_new): create and monitor the HAL manager
- (hal_manager_udi_added_cb): new function; do what
nm_manager_add_device() used to do when signalled by the hal manager
- (hal_manager_udi_removed_cb): new function; do what
nm_manager_remove_device() used to do when signalled by the hal
manager
- (hal_manager_rfkill_changed_cb): handle rfkill changes from the
hal manager
- (hal_manager_hal_reappeared_cb): when HAL comes back, remove devices
in our device list that aren't known to HAL
- (impl_manager_sleep): on wakeup, re-add devices from an idle handler;
see comments on nm-hal-manager.c::nm_manager_state_changed() a few
commits ago
- (nm_manager_get_device_by_path, nm_manager_is_udi_managed,
nm_manager_activation_pending, nm_manager_wireless_enabled,
nm_manager_wireless_hardware_enabled,
nm_manager_set_wireless_hardware_enabled): remove, unused
git-svn-id: http://svn-archive.gnome.org/svn/NetworkManager/trunk@3619 4912f4e0-d625-0410-9fb7-b9a5a253dbdc
2008-04-29 23:03:00 +00:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2014-09-24 16:58:07 -05:00
|
|
|
const char *iface, *type_desc;
|
2011-11-18 12:02:58 -06:00
|
|
|
RfKillType rtype;
|
2014-02-09 10:22:19 -06:00
|
|
|
GSList *iter, *remove = NULL;
|
2014-09-24 14:57:14 -05:00
|
|
|
int ifindex;
|
2015-04-03 10:08:52 -04:00
|
|
|
const char *dbus_path;
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMDevice *candidate;
|
2009-09-04 16:55:48 +02:00
|
|
|
|
2014-02-09 10:22:19 -06:00
|
|
|
/* No duplicates */
|
2014-09-24 14:57:14 -05:00
|
|
|
ifindex = nm_device_get_ifindex (device);
|
2015-12-08 14:51:56 +01:00
|
|
|
if (ifindex > 0 && nm_manager_get_device_by_ifindex (self, ifindex)) {
|
|
|
|
|
g_set_error (error, NM_MANAGER_ERROR, NM_MANAGER_ERROR_FAILED,
|
2016-02-16 16:21:52 +01:00
|
|
|
"A device with ifindex %d already exists", ifindex);
|
2015-12-08 14:51:56 +01:00
|
|
|
return FALSE;
|
|
|
|
|
}
|
2014-02-09 10:22:19 -06:00
|
|
|
|
|
|
|
|
/* Remove existing devices owned by the new device; eg remove ethernet
|
|
|
|
|
* ports that are owned by a WWAN modem, since udev may announce them
|
|
|
|
|
* before the modem is fully discovered.
|
|
|
|
|
*
|
|
|
|
|
* FIXME: use parent/child device relationships instead of removing
|
|
|
|
|
* the child NMDevice entirely
|
|
|
|
|
*/
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_for_each_entry (candidate, &priv->devices_lst_head, devices_lst) {
|
|
|
|
|
if ( nm_device_is_real (candidate)
|
|
|
|
|
&& (iface = nm_device_get_ip_iface (candidate))
|
|
|
|
|
&& nm_device_owns_iface (device, iface))
|
2014-10-15 21:17:45 -05:00
|
|
|
remove = g_slist_prepend (remove, candidate);
|
2009-09-04 16:55:48 +02:00
|
|
|
}
|
2014-02-09 10:22:19 -06:00
|
|
|
for (iter = remove; iter; iter = iter->next)
|
2014-10-13 11:58:26 -05:00
|
|
|
remove_device (self, NM_DEVICE (iter->data), FALSE, FALSE);
|
2014-02-09 10:22:19 -06:00
|
|
|
g_slist_free (remove);
|
2009-09-04 16:55:48 +02:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
g_object_ref (device);
|
|
|
|
|
|
|
|
|
|
nm_assert (c_list_is_empty (&device->devices_lst));
|
|
|
|
|
c_list_link_tail (&priv->devices_lst_head, &device->devices_lst);
|
2009-10-04 23:36:06 -07:00
|
|
|
|
2015-12-07 19:42:59 +01:00
|
|
|
g_signal_connect (device, NM_DEVICE_STATE_CHANGED,
|
2014-02-10 11:13:55 +01:00
|
|
|
G_CALLBACK (manager_device_state_changed),
|
|
|
|
|
self);
|
2007-03-02 Tambet Ingo <tambet@ximian.com>
* libnm-glib/nm-device-802-11-wireless.c: Cache networks (bssids) list.
We get signalled when it changes.
* libnm-glib/nm-client.c: Cache NMState and device list, we get signalled
when it changes.
* libnm-glib/nm-device.c: Cache the device state property.
* libnm-glib/nm-access-point.c: Cache the strength property.
* src/nm-device-802-11-wireless.c: Fix wireless device scanning scheduler.
The new algorithm is to start from SCAN_INTERVAL_MIN (currently defined as 0)
and add a SCAN_INTERVAL_STEP (currently 20 seconds) with each successful scan
until SCAN_INTERVAL_MAX (currently 120 seconds) is reached. Do not scan while
the device is down, activating, or activated (in case of A/B/G cards).
Remove some old dead ifdef'ed out code that used to configure wireless devices,
it's all done through supplicant now.
* src/supplicant-manager/nm-supplicant-interface.c: Fix the reference
counting issues with pending calls which caused leaks and crashes when
interface was removed (now that the interface actually gets removed).
* src/nm-call-store.c: Make a copy of data before running a foreach
with user callback on it - The most common usage pattern is to cancel
(and thus remove) all pending calls with foreach which would modify
the hash table we're iterating over.
* src/nm-manager.c: When a device is added, make sure it is "up". When
it's removed or disabled due to disabling wireless or networking, bring
it down.
* include/NetworkManager.h: Add new device state NM_DEVICE_STATE_DOWN.
* src/nm-device-802-11-wireless.c:
* src/nm-device-802-3-ethernet.c:
* src/nm-device.c:
- Remove "init" virtual function, all gobjects have a place for that
already (constructor).
- Replace "start" virtual function with "bring_up", devices can be
brought up and down more than just on startup now.
- Add "is_up" virtual function.
- Implement one way to bring a device down instead of previous 4 different
ways, each of witch did something different.
* src/NetworkManagerUtils.c (nm_dev_sock_open): This doesn't need an NMDevice,
all it needs is the device interface.
Get rid of NMData.dev_list (3 members to go).
Get rif of NMData in a lot of places.
* gnome/libnm_glib/libnm_glib.c: Make it compile again.
git-svn-id: http://svn-archive.gnome.org/svn/NetworkManager/trunk@2395 4912f4e0-d625-0410-9fb7-b9a5a253dbdc
2007-03-02 09:30:48 +00:00
|
|
|
|
2012-06-01 15:27:39 -05:00
|
|
|
g_signal_connect (device, NM_DEVICE_AUTH_REQUEST,
|
|
|
|
|
G_CALLBACK (device_auth_request_cb),
|
|
|
|
|
self);
|
|
|
|
|
|
2014-02-10 08:49:47 -06:00
|
|
|
g_signal_connect (device, NM_DEVICE_REMOVED,
|
|
|
|
|
G_CALLBACK (device_removed_cb),
|
|
|
|
|
self);
|
|
|
|
|
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
g_signal_connect_data (device, NM_DEVICE_RECHECK_ASSUME,
|
|
|
|
|
G_CALLBACK (recheck_assume_connection_cb),
|
|
|
|
|
self, NULL, G_CONNECT_SWAPPED);
|
2015-12-04 17:17:47 +01:00
|
|
|
|
2014-06-12 13:27:14 -05:00
|
|
|
g_signal_connect (device, "notify::" NM_DEVICE_IP_IFACE,
|
|
|
|
|
G_CALLBACK (device_ip_iface_changed),
|
|
|
|
|
self);
|
2016-01-07 17:54:38 +01:00
|
|
|
|
2016-12-26 11:12:39 +01:00
|
|
|
g_signal_connect (device, "notify::" NM_DEVICE_IFINDEX,
|
|
|
|
|
G_CALLBACK (device_ifindex_changed),
|
|
|
|
|
self);
|
|
|
|
|
|
2016-01-07 17:54:38 +01:00
|
|
|
g_signal_connect (device, "notify::" NM_DEVICE_IFACE,
|
|
|
|
|
G_CALLBACK (device_iface_changed),
|
|
|
|
|
self);
|
|
|
|
|
|
2014-09-24 16:58:07 -05:00
|
|
|
g_signal_connect (device, "notify::" NM_DEVICE_REAL,
|
|
|
|
|
G_CALLBACK (device_realized),
|
|
|
|
|
self);
|
2014-06-12 13:27:14 -05:00
|
|
|
|
2018-07-03 19:09:34 +02:00
|
|
|
g_signal_connect (device, "notify::" NM_DEVICE_IP4_CONNECTIVITY,
|
|
|
|
|
G_CALLBACK (device_connectivity_changed),
|
|
|
|
|
self);
|
|
|
|
|
g_signal_connect (device, "notify::" NM_DEVICE_IP6_CONNECTIVITY,
|
2017-03-20 13:36:00 +00:00
|
|
|
G_CALLBACK (device_connectivity_changed),
|
|
|
|
|
self);
|
|
|
|
|
|
2013-08-13 17:45:34 -04:00
|
|
|
if (priv->startup) {
|
|
|
|
|
g_signal_connect (device, "notify::" NM_DEVICE_HAS_PENDING_ACTION,
|
|
|
|
|
G_CALLBACK (device_has_pending_action_changed),
|
|
|
|
|
self);
|
|
|
|
|
}
|
|
|
|
|
|
2011-11-18 12:02:58 -06:00
|
|
|
/* Update global rfkill state for this device type with the device's
|
|
|
|
|
* rfkill state, and then set this device's rfkill state based on the
|
|
|
|
|
* global state.
|
|
|
|
|
*/
|
|
|
|
|
rtype = nm_device_get_rfkill_type (device);
|
2011-04-13 21:58:25 -05:00
|
|
|
if (rtype != RFKILL_TYPE_UNKNOWN) {
|
|
|
|
|
nm_manager_rfkill_update (self, rtype);
|
2014-09-24 16:58:07 -05:00
|
|
|
nm_device_set_enabled (device, radio_enabled_for_type (self, rtype, TRUE));
|
2011-04-13 21:58:25 -05:00
|
|
|
}
|
|
|
|
|
|
2014-02-09 10:22:19 -06:00
|
|
|
iface = nm_device_get_iface (device);
|
|
|
|
|
g_assert (iface);
|
2009-07-07 14:24:12 -04:00
|
|
|
type_desc = nm_device_get_type_desc (device);
|
|
|
|
|
g_assert (type_desc);
|
2008-04-07 Dan Williams <dcbw@redhat.com>
* include/NetworkManager.h
- Remove the DOWN and CANCELLED device states
- Add UNMANAGED and UNAVAILABLE device states
- Document the device states
* introspection/nm-device.xml
src/nm-device-interface.c
src/nm-device-interface.h
- Add the 'managed' property
* test/nm-tool.c
- (detail_device): print out device state
* src/NetworkManagerSystem.h
src/backends/NetworkManagerArch.c
src/backends/NetworkManagerDebian.c
src/backends/NetworkManagerFrugalware.c
src/backends/NetworkManagerGentoo.c
src/backends/NetworkManagerMandriva.c
src/backends/NetworkManagerPaldo.c
src/backends/NetworkManagerRedHat.c
src/backends/NetworkManagerSlackware.c
src/backends/NetworkManagerSuSE.c
- (nm_system_device_get_system_config, nm_system_device_get_disabled
nm_system_device_free_system_config): remove; they were unused and
their functionality should be re-implemented in each distro's
system settings service plugin
* src/nm-gsm-device.c
src/nm-gsm-device.h
src/nm-cdma-device.c
src/nm-cdma-device.h
- (*_new): take the 'managed' argument
* src/nm-device.c
- (nm_device_set_address): remove, fold into nm_device_bring_up()
- (nm_device_init): start in unmanaged state, not disconnected
- (constructor): don't start device until the system settings service
has had a chance to figure out if the device is managed or not
- (nm_device_deactivate, nm_device_bring_up, nm_device_bring_down):
don't set device state here, let callers handle that as appropriate
- (nm_device_dispose): don't touch the device if it's not managed
- (set_property, get_property, nm_device_class_init): implement the
'managed' property
- (nm_device_state_changed): bring the device up if its now managed,
and deactivate it if it used to be active
- (nm_device_get_managed, nm_device_set_managed): do the right thing
with the managed state
* src/nm-hal-manager.c
- (wired_device_creator, wireless_device_creator, modem_device_creator):
take initial managed state and pass it along to device constructors
- (create_device_and_add_to_list): get managed state and pass to
type creators
* src/nm-device-802-11-wireless.c
- (real_can_activate): fold in most of
nm_device_802_11_wireless_can_activate()
- (can_scan): can't scan in UNAVAILABLE or UNMANAGED
- (link_timeout_cb): instead of deactivating, change device state and
let the device state handler to it
- (real_update_hw_address): clean up
- (state_changed_cb): when entering UNAVAILABLE state, schedule an idle
handler to transition to DISCONNECTED if the device isn't rfkilled
* src/nm-device-802-3-ethernet.c
- (set_carrier): move above callers and get rid of prototype
- (device_state_changed): when entering UNAVAILABLE state, schedule an
idle handler to transition to DISCONNECTED if the device has a
carrier
- (real_update_hw_address): clean up
- (link_timeout_cb, ppp_state_changed): change state instead of calling
deactivation directly as deactivation doesn't change state anymore
* src/NetworkManagerPolicy.c
- (schedule_activate_check): yay, remove wireless_enabled hack since
the NMManager and wireless devices work that out themselves now
- (device_state_changed): change to a switch and update for new device
states
- (device_carrier_changed): remove; device handles this now through
state changes
- (device_added): don't care about carrier any more; the initial
activation check will happen when the device transitions to
DISCONNECTED
* src/nm-manager.c
- (dispose): clear unmanaged devices
- (handle_unmanaged_devices): update unmanaged device list and toggle
the managed property on each device when needed
- (system_settings_properties_changed_cb): handle signals from the
system settings service
- (system_settings_get_unmanaged_devices_cb): handle callback from
getting the unmanaged device list method call
- (query_unmanaged_devices): ask the system settings service for its
list of unmanaged devices
- (nm_manager_name_owner_changed, initial_get_connections): get unmanaged
devices
- (manager_set_wireless_enabled): push rfkill state down to wireless
devices directly and let them handle the necessary state transitions
- (manager_device_state_changed): update for new device states
- (nm_manager_add_device): set initial rfkill state on wireless devices
- (nm_manager_remove_device): don't touch the device if it's unmanaged
- (nm_manager_activate_connection): return error if the device is
unmanaged
- (nm_manager_sleep): handle new device states correctly; don't change
the state of unavailable/unmanaged devices
* libnm-glib/nm-device-802-11-wireless.c
- (state_changed_cb): update for new device states
git-svn-id: http://svn-archive.gnome.org/svn/NetworkManager/trunk@3540 4912f4e0-d625-0410-9fb7-b9a5a253dbdc
2008-04-08 02:58:02 +00:00
|
|
|
|
2016-10-25 15:27:57 +02:00
|
|
|
nm_device_set_unmanaged_by_user_settings (device);
|
device: remove default-unmanaged and refactor unmanaged flags
Get rid of NM_UNMANAGED_DEFAULT and refine the interaction between
unmanaged flags, device state and managed property.
Previously, the NM_UNMANAGED_DEFAULT was special in that a device was
still considered managed if it had solely the NM_UNMANAGED_DEFAULT flag
set and its state was managed. Thus, whether the device (state) was managed,
depended on the device state too.
Now, a device is considered managed (or unmanaged) based on the unmanaged
flags and realization state alone. At the same time, the device state
directly corresponds to the managed property of the device. Of course,
while changing the unmanaged flags, that invariant is shortly violated
until the state transistion is complete.
Introduce more unmanaged flags whereas some of them are non-authorative.
For example, the EXTERNAL_DOWN flag has only effect as long as the user
didn't explicitly manage the device (NM_UNMANAGED_USER_EXPLICIT). In other
words, certain flags can render other flags ineffective. Whether the device
is considered managed depends on the flags but also at the explicitly unset flags.
In a way, this is similar to previous where NM_UNMANAGED_DEFAULT was ignored
(if no other flags were present).
Also, previously a device that was NM_UNMANAGED_DEFAULT and in disconnected
state would transition back to unmanaged. No longer do that. Once a device is
managed, it stays managed as long as the flags indicate it should be managed.
However, the user can also modify the unmanaged flags via the D-Bus API.
Also get rid or nm_device_finish_init(). That was previously called
by NMManager after add_device(). As we now realize devices (possibly
multiple times) this should be handled during realization.
https://bugzilla.gnome.org/show_bug.cgi?id=746566
2015-09-15 15:35:16 +02:00
|
|
|
|
|
|
|
|
nm_device_set_unmanaged_flags (device,
|
|
|
|
|
NM_UNMANAGED_SLEEPING,
|
|
|
|
|
manager_sleeping (self));
|
2014-03-31 21:45:54 -05:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
dbus_path = nm_dbus_object_export (NM_DBUS_OBJECT (device));
|
2017-06-07 12:46:10 +02:00
|
|
|
_LOG2I (LOGD_DEVICE, device, "new %s device (%s)", type_desc, dbus_path);
|
2015-04-03 10:08:52 -04:00
|
|
|
|
2010-10-27 20:05:23 -05:00
|
|
|
nm_settings_device_added (priv->settings, device);
|
2014-10-06 11:21:54 -05:00
|
|
|
g_signal_emit (self, signals[INTERNAL_DEVICE_ADDED], 0, device);
|
2016-04-01 17:34:51 +02:00
|
|
|
_notify (self, PROP_ALL_DEVICES);
|
2009-08-05 18:03:09 -04:00
|
|
|
|
2016-12-26 11:12:39 +01:00
|
|
|
_parent_notify_changed (self, device, FALSE);
|
|
|
|
|
|
2015-12-08 14:51:56 +01:00
|
|
|
return TRUE;
|
2007-02-08 15:34:26 +00:00
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2014-02-09 10:22:19 -06:00
|
|
|
static void
|
|
|
|
|
factory_device_added_cb (NMDeviceFactory *factory,
|
|
|
|
|
NMDevice *device,
|
|
|
|
|
gpointer user_data)
|
|
|
|
|
{
|
device: remove default-unmanaged and refactor unmanaged flags
Get rid of NM_UNMANAGED_DEFAULT and refine the interaction between
unmanaged flags, device state and managed property.
Previously, the NM_UNMANAGED_DEFAULT was special in that a device was
still considered managed if it had solely the NM_UNMANAGED_DEFAULT flag
set and its state was managed. Thus, whether the device (state) was managed,
depended on the device state too.
Now, a device is considered managed (or unmanaged) based on the unmanaged
flags and realization state alone. At the same time, the device state
directly corresponds to the managed property of the device. Of course,
while changing the unmanaged flags, that invariant is shortly violated
until the state transistion is complete.
Introduce more unmanaged flags whereas some of them are non-authorative.
For example, the EXTERNAL_DOWN flag has only effect as long as the user
didn't explicitly manage the device (NM_UNMANAGED_USER_EXPLICIT). In other
words, certain flags can render other flags ineffective. Whether the device
is considered managed depends on the flags but also at the explicitly unset flags.
In a way, this is similar to previous where NM_UNMANAGED_DEFAULT was ignored
(if no other flags were present).
Also, previously a device that was NM_UNMANAGED_DEFAULT and in disconnected
state would transition back to unmanaged. No longer do that. Once a device is
managed, it stays managed as long as the flags indicate it should be managed.
However, the user can also modify the unmanaged flags via the D-Bus API.
Also get rid or nm_device_finish_init(). That was previously called
by NMManager after add_device(). As we now realize devices (possibly
multiple times) this should be handled during realization.
https://bugzilla.gnome.org/show_bug.cgi?id=746566
2015-09-15 15:35:16 +02:00
|
|
|
NMManager *self = user_data;
|
2014-09-05 08:50:02 -05:00
|
|
|
GError *error = NULL;
|
|
|
|
|
|
device: remove default-unmanaged and refactor unmanaged flags
Get rid of NM_UNMANAGED_DEFAULT and refine the interaction between
unmanaged flags, device state and managed property.
Previously, the NM_UNMANAGED_DEFAULT was special in that a device was
still considered managed if it had solely the NM_UNMANAGED_DEFAULT flag
set and its state was managed. Thus, whether the device (state) was managed,
depended on the device state too.
Now, a device is considered managed (or unmanaged) based on the unmanaged
flags and realization state alone. At the same time, the device state
directly corresponds to the managed property of the device. Of course,
while changing the unmanaged flags, that invariant is shortly violated
until the state transistion is complete.
Introduce more unmanaged flags whereas some of them are non-authorative.
For example, the EXTERNAL_DOWN flag has only effect as long as the user
didn't explicitly manage the device (NM_UNMANAGED_USER_EXPLICIT). In other
words, certain flags can render other flags ineffective. Whether the device
is considered managed depends on the flags but also at the explicitly unset flags.
In a way, this is similar to previous where NM_UNMANAGED_DEFAULT was ignored
(if no other flags were present).
Also, previously a device that was NM_UNMANAGED_DEFAULT and in disconnected
state would transition back to unmanaged. No longer do that. Once a device is
managed, it stays managed as long as the flags indicate it should be managed.
However, the user can also modify the unmanaged flags via the D-Bus API.
Also get rid or nm_device_finish_init(). That was previously called
by NMManager after add_device(). As we now realize devices (possibly
multiple times) this should be handled during realization.
https://bugzilla.gnome.org/show_bug.cgi?id=746566
2015-09-15 15:35:16 +02:00
|
|
|
g_return_if_fail (NM_IS_MANAGER (self));
|
|
|
|
|
|
2016-09-26 14:45:35 +02:00
|
|
|
if (nm_device_realize_start (device,
|
|
|
|
|
NULL,
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
FALSE, /* assume_state_guess_assume */
|
|
|
|
|
NULL, /* assume_state_connection_uuid */
|
|
|
|
|
FALSE, /* set_nm_owned */
|
2016-09-26 14:45:35 +02:00
|
|
|
NM_UNMAN_FLAG_OP_FORGET,
|
|
|
|
|
NULL,
|
|
|
|
|
&error)) {
|
device: remove default-unmanaged and refactor unmanaged flags
Get rid of NM_UNMANAGED_DEFAULT and refine the interaction between
unmanaged flags, device state and managed property.
Previously, the NM_UNMANAGED_DEFAULT was special in that a device was
still considered managed if it had solely the NM_UNMANAGED_DEFAULT flag
set and its state was managed. Thus, whether the device (state) was managed,
depended on the device state too.
Now, a device is considered managed (or unmanaged) based on the unmanaged
flags and realization state alone. At the same time, the device state
directly corresponds to the managed property of the device. Of course,
while changing the unmanaged flags, that invariant is shortly violated
until the state transistion is complete.
Introduce more unmanaged flags whereas some of them are non-authorative.
For example, the EXTERNAL_DOWN flag has only effect as long as the user
didn't explicitly manage the device (NM_UNMANAGED_USER_EXPLICIT). In other
words, certain flags can render other flags ineffective. Whether the device
is considered managed depends on the flags but also at the explicitly unset flags.
In a way, this is similar to previous where NM_UNMANAGED_DEFAULT was ignored
(if no other flags were present).
Also, previously a device that was NM_UNMANAGED_DEFAULT and in disconnected
state would transition back to unmanaged. No longer do that. Once a device is
managed, it stays managed as long as the flags indicate it should be managed.
However, the user can also modify the unmanaged flags via the D-Bus API.
Also get rid or nm_device_finish_init(). That was previously called
by NMManager after add_device(). As we now realize devices (possibly
multiple times) this should be handled during realization.
https://bugzilla.gnome.org/show_bug.cgi?id=746566
2015-09-15 15:35:16 +02:00
|
|
|
add_device (self, device, NULL);
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
_device_realize_finish (self, device, NULL);
|
2017-09-13 18:38:59 +02:00
|
|
|
retry_connections_for_parent_device (self, device);
|
2014-09-24 16:58:07 -05:00
|
|
|
} else {
|
2017-06-07 12:46:10 +02:00
|
|
|
_LOG2W (LOGD_DEVICE, device, "failed to realize device: %s", error->message);
|
2014-09-05 08:50:02 -05:00
|
|
|
g_error_free (error);
|
|
|
|
|
}
|
2014-02-09 10:22:19 -06:00
|
|
|
}
|
2012-02-05 23:18:32 -06:00
|
|
|
|
2014-02-09 10:22:19 -06:00
|
|
|
static gboolean
|
|
|
|
|
factory_component_added_cb (NMDeviceFactory *factory,
|
|
|
|
|
GObject *component,
|
|
|
|
|
gpointer user_data)
|
2012-02-05 23:18:32 -06:00
|
|
|
{
|
2016-04-28 14:10:06 +02:00
|
|
|
NMManager *self = user_data;
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
NMDevice *device;
|
2015-11-24 16:43:18 +01:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst) {
|
|
|
|
|
if (nm_device_notify_component_added (device, component))
|
2015-11-24 16:43:18 +01:00
|
|
|
return TRUE;
|
|
|
|
|
}
|
|
|
|
|
return FALSE;
|
2012-02-05 23:18:32 -06:00
|
|
|
}
|
|
|
|
|
|
2014-09-17 14:17:30 -05:00
|
|
|
static void
|
|
|
|
|
_register_device_factory (NMDeviceFactory *factory, gpointer user_data)
|
2014-09-05 15:05:40 -05:00
|
|
|
{
|
2014-09-17 14:17:30 -05:00
|
|
|
NMManager *self = NM_MANAGER (user_data);
|
2014-09-05 15:05:40 -05:00
|
|
|
|
|
|
|
|
g_signal_connect (factory,
|
|
|
|
|
NM_DEVICE_FACTORY_DEVICE_ADDED,
|
|
|
|
|
G_CALLBACK (factory_device_added_cb),
|
|
|
|
|
self);
|
|
|
|
|
g_signal_connect (factory,
|
|
|
|
|
NM_DEVICE_FACTORY_COMPONENT_ADDED,
|
|
|
|
|
G_CALLBACK (factory_component_added_cb),
|
|
|
|
|
self);
|
2012-02-05 23:18:32 -06:00
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2014-09-05 15:57:40 -05:00
|
|
|
|
2009-01-19 11:01:00 +02:00
|
|
|
static void
|
2014-03-07 19:04:38 +01:00
|
|
|
platform_link_added (NMManager *self,
|
|
|
|
|
int ifindex,
|
2016-09-23 17:36:21 +02:00
|
|
|
const NMPlatformLink *plink,
|
device: assume matching connections during first start
Since commit 2d1b85f (th/assume-vs-unmanaged-bgo746440), we clearly
distinguish between two modes when encountering devices with external
IP configuration:
a) external devices. For those devices we generate a volatile in-memory
connection and pretend it's active. However, the device must not be
touched by NetworkManager in any way.
b) assume, seamless take over. Mostly for restart of NetworkManager,
we activate a connection gracefully without going through an down-up
cycle. After the device reaches activated state, the device is
considered fully managed. For this only an existing, non volatile
connection can be used.
Before 'th/assume-vs-unmanaged-bgo746440', the behaviors were not
clearly separated.
Since then, we only choose to assume a connection (b) when the state
file indicates a matching connection. Now, extend this to also assume
connections when:
- during first-start (not after a restart) when there is no
state file yet.
- and, if we have an existing, non volatile, connection which
matches the device's configuration.
This patch lets NetworkManager assume connection also on first start.
That is for example useful when handing over network configuration from
initrd.
This only applies to existing, permanent, matching(!) connections, so it is a
good guess that the user wants NM to take over this interface. This brings us
closer to the previous behavior before 'th/assume-vs-unmanaged-bgo746440'.
https://bugzilla.redhat.com/show_bug.cgi?id=1439220
(cherry picked from commit 27b2477cb7dad2410c88c7dfca51f3aad208b881)
2017-04-19 16:16:12 +02:00
|
|
|
gboolean guess_assume,
|
2016-09-23 17:36:21 +02:00
|
|
|
const NMConfigDeviceStateData *dev_state)
|
2009-01-19 11:01:00 +02:00
|
|
|
{
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2014-09-17 14:17:30 -05:00
|
|
|
NMDeviceFactory *factory;
|
2012-02-10 12:21:10 -06:00
|
|
|
NMDevice *device = NULL;
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMDevice *candidate;
|
2009-01-19 11:01:00 +02:00
|
|
|
|
2013-05-24 17:30:31 -03:00
|
|
|
g_return_if_fail (ifindex > 0);
|
2012-03-06 11:38:03 -06:00
|
|
|
|
2014-09-05 13:15:44 -05:00
|
|
|
if (nm_manager_get_device_by_ifindex (self, ifindex))
|
2013-05-24 17:30:31 -03:00
|
|
|
return;
|
2009-01-19 11:01:00 +02:00
|
|
|
|
2014-10-15 21:17:45 -05:00
|
|
|
/* Let unrealized devices try to realize themselves with the link */
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_for_each_entry (candidate, &priv->devices_lst_head, devices_lst) {
|
2014-10-15 21:17:45 -05:00
|
|
|
gboolean compatible = TRUE;
|
2016-04-04 19:42:04 +02:00
|
|
|
gs_free_error GError *error = NULL;
|
2014-10-09 12:42:29 -05:00
|
|
|
|
2017-10-10 11:14:05 +02:00
|
|
|
if (nm_device_get_link_type (candidate) != plink->type)
|
|
|
|
|
continue;
|
|
|
|
|
|
2014-10-15 21:17:45 -05:00
|
|
|
if (strcmp (nm_device_get_iface (candidate), plink->name))
|
|
|
|
|
continue;
|
2014-10-09 12:42:29 -05:00
|
|
|
|
2014-10-15 21:17:45 -05:00
|
|
|
if (nm_device_is_real (candidate)) {
|
2018-08-02 17:45:09 +02:00
|
|
|
/* There's already a realized device with the link's name
|
|
|
|
|
* and a different ifindex.
|
2014-10-15 21:17:45 -05:00
|
|
|
*/
|
2018-08-02 17:45:09 +02:00
|
|
|
if (nm_device_get_ifindex (candidate) <= 0)
|
|
|
|
|
nm_device_update_from_platform_link (candidate, plink);
|
|
|
|
|
else {
|
|
|
|
|
/* The ifindex of a device can't be changed after
|
|
|
|
|
* initialization because it is used as a key by
|
|
|
|
|
* the dns-manager.
|
|
|
|
|
*/
|
|
|
|
|
_LOGD (LOGD_DEVICE, "(%s): removing old device %p after ifindex change from %d to %d",
|
|
|
|
|
plink->name, candidate, nm_device_get_ifindex (candidate), ifindex);
|
|
|
|
|
remove_device (self, candidate, FALSE, TRUE);
|
|
|
|
|
goto add;
|
|
|
|
|
}
|
2014-10-15 21:17:45 -05:00
|
|
|
return;
|
2016-09-26 14:45:35 +02:00
|
|
|
} else if (nm_device_realize_start (candidate,
|
|
|
|
|
plink,
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
FALSE, /* assume_state_guess_assume */
|
|
|
|
|
NULL, /* assume_state_connection_uuid */
|
|
|
|
|
FALSE, /* set_nm_owned */
|
2016-09-26 14:45:35 +02:00
|
|
|
NM_UNMAN_FLAG_OP_FORGET,
|
|
|
|
|
&compatible,
|
|
|
|
|
&error)) {
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
_device_realize_finish (self, candidate, plink);
|
2014-09-24 16:58:07 -05:00
|
|
|
return;
|
|
|
|
|
}
|
2014-10-09 12:42:29 -05:00
|
|
|
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGD (LOGD_DEVICE, "(%s): failed to realize from plink: '%s'",
|
|
|
|
|
plink->name, error->message);
|
2014-10-09 12:42:29 -05:00
|
|
|
|
2014-10-15 21:17:45 -05:00
|
|
|
/* Try next unrealized device */
|
2014-09-24 16:58:07 -05:00
|
|
|
}
|
|
|
|
|
|
2018-08-02 17:45:09 +02:00
|
|
|
add:
|
2014-09-17 14:17:30 -05:00
|
|
|
/* Try registered device factories */
|
|
|
|
|
factory = nm_device_factory_manager_find_factory_for_link_type (plink->type);
|
|
|
|
|
if (factory) {
|
2015-05-06 09:53:44 -05:00
|
|
|
gboolean ignore = FALSE;
|
2016-04-04 19:42:04 +02:00
|
|
|
gs_free_error GError *error = NULL;
|
2015-05-06 09:53:44 -05:00
|
|
|
|
2014-09-05 08:50:02 -05:00
|
|
|
device = nm_device_factory_create_device (factory, plink->name, plink, NULL, &ignore, &error);
|
2014-09-17 14:17:30 -05:00
|
|
|
if (!device) {
|
2015-05-06 09:53:44 -05:00
|
|
|
if (!ignore) {
|
2016-10-06 21:28:40 +02:00
|
|
|
_LOGW (LOGD_PLATFORM, "%s: factory failed to create device: %s",
|
2016-03-02 11:38:26 +01:00
|
|
|
plink->name, error->message);
|
2016-04-12 10:19:21 +02:00
|
|
|
} else {
|
2016-10-06 21:28:40 +02:00
|
|
|
_LOGD (LOGD_PLATFORM, "%s: factory failed to create device: %s",
|
2016-04-12 10:19:21 +02:00
|
|
|
plink->name, error->message);
|
2015-05-06 09:53:44 -05:00
|
|
|
}
|
2012-02-05 23:18:32 -06:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (device == NULL) {
|
2016-09-26 13:26:15 +02:00
|
|
|
gboolean nm_plugin_missing = FALSE;
|
|
|
|
|
|
2013-10-22 17:11:24 +02:00
|
|
|
switch (plink->type) {
|
2016-06-14 11:19:15 -05:00
|
|
|
case NM_LINK_TYPE_WWAN_NET:
|
2015-05-06 09:53:44 -05:00
|
|
|
case NM_LINK_TYPE_BNEP:
|
2014-05-09 10:30:04 -05:00
|
|
|
case NM_LINK_TYPE_OLPC_MESH:
|
2014-06-27 17:02:23 +02:00
|
|
|
case NM_LINK_TYPE_TEAM:
|
2014-05-09 10:30:04 -05:00
|
|
|
case NM_LINK_TYPE_WIFI:
|
2016-10-06 21:28:40 +02:00
|
|
|
_LOGI (LOGD_PLATFORM, "(%s): '%s' plugin not available; creating generic device",
|
2016-03-02 11:38:26 +01:00
|
|
|
plink->name, nm_link_type_to_string (plink->type));
|
2015-04-14 14:43:31 +02:00
|
|
|
nm_plugin_missing = TRUE;
|
2014-05-09 10:30:04 -05:00
|
|
|
/* fall through */
|
2013-04-25 15:46:39 -04:00
|
|
|
default:
|
2016-09-26 13:26:15 +02:00
|
|
|
device = nm_device_generic_new (plink, nm_plugin_missing);
|
2013-04-25 15:46:39 -04:00
|
|
|
break;
|
2013-04-17 10:41:15 -04:00
|
|
|
}
|
2012-02-05 23:18:32 -06:00
|
|
|
}
|
|
|
|
|
|
2014-02-09 10:22:19 -06:00
|
|
|
if (device) {
|
2016-04-04 19:42:04 +02:00
|
|
|
gs_free_error GError *error = NULL;
|
2016-09-26 14:51:23 +02:00
|
|
|
NMUnmanFlagOp unmanaged_user_explicit = NM_UNMAN_FLAG_OP_FORGET;
|
|
|
|
|
|
|
|
|
|
if (dev_state) {
|
|
|
|
|
switch (dev_state->managed) {
|
|
|
|
|
case NM_CONFIG_DEVICE_STATE_MANAGED_TYPE_MANAGED:
|
|
|
|
|
unmanaged_user_explicit = NM_UNMAN_FLAG_OP_SET_MANAGED;
|
|
|
|
|
break;
|
|
|
|
|
case NM_CONFIG_DEVICE_STATE_MANAGED_TYPE_UNMANAGED:
|
|
|
|
|
unmanaged_user_explicit = NM_UNMAN_FLAG_OP_SET_UNMANAGED;
|
|
|
|
|
break;
|
|
|
|
|
case NM_CONFIG_DEVICE_STATE_MANAGED_TYPE_UNKNOWN:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-04-04 19:42:04 +02:00
|
|
|
|
2016-09-26 14:45:35 +02:00
|
|
|
if (nm_device_realize_start (device,
|
|
|
|
|
plink,
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
guess_assume,
|
|
|
|
|
dev_state ? dev_state->connection_uuid : NULL,
|
2017-06-07 22:11:50 +02:00
|
|
|
dev_state ? (dev_state->nm_owned == 1) : FALSE,
|
2016-09-26 14:51:23 +02:00
|
|
|
unmanaged_user_explicit,
|
2016-09-26 14:45:35 +02:00
|
|
|
NULL,
|
|
|
|
|
&error)) {
|
2015-12-08 14:51:56 +01:00
|
|
|
add_device (self, device, NULL);
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
_device_realize_finish (self, device, plink);
|
2017-09-13 18:38:59 +02:00
|
|
|
retry_connections_for_parent_device (self, device);
|
2014-09-24 16:58:07 -05:00
|
|
|
} else {
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGW (LOGD_DEVICE, "%s: failed to realize device: %s",
|
|
|
|
|
plink->name, error->message);
|
2015-08-25 16:10:40 +02:00
|
|
|
}
|
2014-02-09 10:22:19 -06:00
|
|
|
g_object_unref (device);
|
|
|
|
|
}
|
2009-01-19 11:01:00 +02:00
|
|
|
}
|
|
|
|
|
|
core: delay handling of link-changed platform event in manager
Backtrace:
NetworkManager[10972]: <debug> [1435142179.593334] [platform/nm-platform.c:2962] log_ip4_route(): signal: route 4 removed: 0.0.0.0/0 via 192.168.100.1 dev 85 metric 300 mss 0 src user scope global
NetworkManager[10972]: <debug> [1435142179.593421] [platform/nm-platform.c:2944] log_link(): signal: link removed: 85: bond0 <DOWN;broadcast,multicast,master> mtu 1500 arp 1 bond* init addr 7A:AB:BE:0D:19:3D driver bond
NetworkManager[10972]: <debug> [1435142179.593446] [nm-manager.c:779] remove_device(): (bond0): removing device (allow_unmanage 1, managed 1)
NetworkManager[10972]: <debug> [1435142179.596995] [devices/nm-device.c:7232] nm_device_set_unmanaged(): [0x5555559d2a40] (bond0): now unmanaged
NetworkManager[10972]: (devices/nm-device.c:8040):_set_state_full: runtime check failed: (priv->in_state_changed == FALSE)
#0 0x00007ffff4a538c3 in g_logv () at /lib64/libglib-2.0.so.0
#1 0x00007ffff4a53a3f in g_log () at /lib64/libglib-2.0.so.0
#2 0x00007ffff4a53d56 in g_warn_message () at /lib64/libglib-2.0.so.0
#3 0x00005555555b9dca in _set_state_full (self=0x5555559d2a40, state=NM_DEVICE_STATE_UNMANAGED, reason=NM_DEVICE_STATE_REASON_REMOVED, quitting=0) at devices/nm-device.c:8040
#4 0x0000555555626d7b in remove_device (manager=0x5555559631e0, device=0x5555559d2a40, quitting=0, allow_unmanage=<optimized out>) at nm-manager.c:801
#5 0x00007ffff28b7dac in ffi_call_unix64 () at /lib64/libffi.so.6
#6 0x00007ffff28b76d5 in ffi_call () at /lib64/libffi.so.6
#7 0x00007ffff4d4a628 in g_cclosure_marshal_generic () at /lib64/libgobject-2.0.so.0
#8 0x00007ffff4d49de8 in g_closure_invoke () at /lib64/libgobject-2.0.so.0
#9 0x00007ffff4d5b70d in signal_emit_unlocked_R () at /lib64/libgobject-2.0.so.0
#10 0x00007ffff4d63471 in g_signal_emit_valist () at /lib64/libgobject-2.0.so.0
#11 0x00007ffff4d63c78 in g_signal_emit_by_name () at /lib64/libgobject-2.0.so.0
#12 0x00005555555ce4ea in do_emit_signal (platform=platform@entry=0x55555594c8b0, obj=0x555555a74c50, cache_op=NMP_CACHE_OPS_REMOVED, was_visible=<optimized out>, reason=reason@entry=
NM_PLATFORM_REASON_INTERNAL) at platform/nm-linux-platform.c:1425
#13 0x00005555555ce826 in cache_prune_candidates_prune (platform=platform@entry=0x55555594c8b0) at platform/nm-linux-platform.c:1704
#14 0x00005555555d32d3 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=85, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=0)
at platform/nm-linux-platform.c:1951
#15 0x00005555555d356b in delayed_action_handle_all (ifindex=85, platform=0x55555594c8b0) at platform/nm-linux-platform.c:1491
#16 0x00005555555d356b in delayed_action_handle_all (platform=0x55555594c8b0) at platform/nm-linux-platform.c:1573
#17 0x00005555555d356b in delayed_action_handle_all (platform=platform@entry=0x55555594c8b0, read_netlink=read_netlink@entry=0) at platform/nm-linux-platform.c:1588
#18 0x00005555555d32e2 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=7, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=1)
at platform/nm-linux-platform.c:1954
#19 0x00005555555d5177 in do_change_link (platform=platform@entry=0x55555594c8b0, nlo=nlo@entry=0x55555597f0f0, complete_from_cache=complete_from_cache@entry=1) at platform/nm-linux-platform.c:2753
#20 0x00005555555d56b4 in link_enslave (platform=0x55555594c8b0, master=0, slave=7) at platform/nm-linux-platform.c:3141
#21 0x00005555555976de in release_slave (device=0x5555559d2a40, slave=0x5555559c6be0, configure=<optimized out>) at devices/nm-device-bond.c:437
#22 0x00005555555b7bc3 in nm_device_release_one_slave (self=self@entry=0x5555559d2a40, slave=0x5555559c6be0, configure=configure@entry=1, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:1049
#23 0x00005555555b7f0e in nm_device_master_release_slaves (self=self@entry=0x5555559d2a40) at devices/nm-device.c:1781
#24 0x00005555555b9592 in nm_device_cleanup (self=0x5555559d2a40, reason=<optimized out>, deconfigure=1) at devices/nm-device.c:7752
#25 0x00005555555ba161 in _set_state_full (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED, quitting=quitting@entry=0) at devices/nm-device.c:8128
#26 0x00005555555bb297 in nm_device_state_changed (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:8319
#27 0x00005555555bd9a5 in queued_set_state (user_data=<optimized out>) at devices/nm-device.c:8343
#28 0x00007ffff4a4c79a in g_main_context_dispatch () at /lib64/libglib-2.0.so.0
#29 0x00007ffff4a4cae8 in g_main_context_iterate.isra.24 () at /lib64/libglib-2.0.so.0
#30 0x00007ffff4a4cdba in g_main_loop_run () at /lib64/libglib-2.0.so.0
#31 0x000055555559556f in main (argc=1, argv=0x7fffffffdb88) at main.c:518
2015-06-24 13:42:16 +02:00
|
|
|
typedef struct {
|
2017-09-29 15:04:53 +02:00
|
|
|
CList lst;
|
core: delay handling of link-changed platform event in manager
Backtrace:
NetworkManager[10972]: <debug> [1435142179.593334] [platform/nm-platform.c:2962] log_ip4_route(): signal: route 4 removed: 0.0.0.0/0 via 192.168.100.1 dev 85 metric 300 mss 0 src user scope global
NetworkManager[10972]: <debug> [1435142179.593421] [platform/nm-platform.c:2944] log_link(): signal: link removed: 85: bond0 <DOWN;broadcast,multicast,master> mtu 1500 arp 1 bond* init addr 7A:AB:BE:0D:19:3D driver bond
NetworkManager[10972]: <debug> [1435142179.593446] [nm-manager.c:779] remove_device(): (bond0): removing device (allow_unmanage 1, managed 1)
NetworkManager[10972]: <debug> [1435142179.596995] [devices/nm-device.c:7232] nm_device_set_unmanaged(): [0x5555559d2a40] (bond0): now unmanaged
NetworkManager[10972]: (devices/nm-device.c:8040):_set_state_full: runtime check failed: (priv->in_state_changed == FALSE)
#0 0x00007ffff4a538c3 in g_logv () at /lib64/libglib-2.0.so.0
#1 0x00007ffff4a53a3f in g_log () at /lib64/libglib-2.0.so.0
#2 0x00007ffff4a53d56 in g_warn_message () at /lib64/libglib-2.0.so.0
#3 0x00005555555b9dca in _set_state_full (self=0x5555559d2a40, state=NM_DEVICE_STATE_UNMANAGED, reason=NM_DEVICE_STATE_REASON_REMOVED, quitting=0) at devices/nm-device.c:8040
#4 0x0000555555626d7b in remove_device (manager=0x5555559631e0, device=0x5555559d2a40, quitting=0, allow_unmanage=<optimized out>) at nm-manager.c:801
#5 0x00007ffff28b7dac in ffi_call_unix64 () at /lib64/libffi.so.6
#6 0x00007ffff28b76d5 in ffi_call () at /lib64/libffi.so.6
#7 0x00007ffff4d4a628 in g_cclosure_marshal_generic () at /lib64/libgobject-2.0.so.0
#8 0x00007ffff4d49de8 in g_closure_invoke () at /lib64/libgobject-2.0.so.0
#9 0x00007ffff4d5b70d in signal_emit_unlocked_R () at /lib64/libgobject-2.0.so.0
#10 0x00007ffff4d63471 in g_signal_emit_valist () at /lib64/libgobject-2.0.so.0
#11 0x00007ffff4d63c78 in g_signal_emit_by_name () at /lib64/libgobject-2.0.so.0
#12 0x00005555555ce4ea in do_emit_signal (platform=platform@entry=0x55555594c8b0, obj=0x555555a74c50, cache_op=NMP_CACHE_OPS_REMOVED, was_visible=<optimized out>, reason=reason@entry=
NM_PLATFORM_REASON_INTERNAL) at platform/nm-linux-platform.c:1425
#13 0x00005555555ce826 in cache_prune_candidates_prune (platform=platform@entry=0x55555594c8b0) at platform/nm-linux-platform.c:1704
#14 0x00005555555d32d3 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=85, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=0)
at platform/nm-linux-platform.c:1951
#15 0x00005555555d356b in delayed_action_handle_all (ifindex=85, platform=0x55555594c8b0) at platform/nm-linux-platform.c:1491
#16 0x00005555555d356b in delayed_action_handle_all (platform=0x55555594c8b0) at platform/nm-linux-platform.c:1573
#17 0x00005555555d356b in delayed_action_handle_all (platform=platform@entry=0x55555594c8b0, read_netlink=read_netlink@entry=0) at platform/nm-linux-platform.c:1588
#18 0x00005555555d32e2 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=7, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=1)
at platform/nm-linux-platform.c:1954
#19 0x00005555555d5177 in do_change_link (platform=platform@entry=0x55555594c8b0, nlo=nlo@entry=0x55555597f0f0, complete_from_cache=complete_from_cache@entry=1) at platform/nm-linux-platform.c:2753
#20 0x00005555555d56b4 in link_enslave (platform=0x55555594c8b0, master=0, slave=7) at platform/nm-linux-platform.c:3141
#21 0x00005555555976de in release_slave (device=0x5555559d2a40, slave=0x5555559c6be0, configure=<optimized out>) at devices/nm-device-bond.c:437
#22 0x00005555555b7bc3 in nm_device_release_one_slave (self=self@entry=0x5555559d2a40, slave=0x5555559c6be0, configure=configure@entry=1, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:1049
#23 0x00005555555b7f0e in nm_device_master_release_slaves (self=self@entry=0x5555559d2a40) at devices/nm-device.c:1781
#24 0x00005555555b9592 in nm_device_cleanup (self=0x5555559d2a40, reason=<optimized out>, deconfigure=1) at devices/nm-device.c:7752
#25 0x00005555555ba161 in _set_state_full (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED, quitting=quitting@entry=0) at devices/nm-device.c:8128
#26 0x00005555555bb297 in nm_device_state_changed (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:8319
#27 0x00005555555bd9a5 in queued_set_state (user_data=<optimized out>) at devices/nm-device.c:8343
#28 0x00007ffff4a4c79a in g_main_context_dispatch () at /lib64/libglib-2.0.so.0
#29 0x00007ffff4a4cae8 in g_main_context_iterate.isra.24 () at /lib64/libglib-2.0.so.0
#30 0x00007ffff4a4cdba in g_main_loop_run () at /lib64/libglib-2.0.so.0
#31 0x000055555559556f in main (argc=1, argv=0x7fffffffdb88) at main.c:518
2015-06-24 13:42:16 +02:00
|
|
|
NMManager *self;
|
|
|
|
|
int ifindex;
|
2017-09-29 15:04:53 +02:00
|
|
|
guint idle_id;
|
core: delay handling of link-changed platform event in manager
Backtrace:
NetworkManager[10972]: <debug> [1435142179.593334] [platform/nm-platform.c:2962] log_ip4_route(): signal: route 4 removed: 0.0.0.0/0 via 192.168.100.1 dev 85 metric 300 mss 0 src user scope global
NetworkManager[10972]: <debug> [1435142179.593421] [platform/nm-platform.c:2944] log_link(): signal: link removed: 85: bond0 <DOWN;broadcast,multicast,master> mtu 1500 arp 1 bond* init addr 7A:AB:BE:0D:19:3D driver bond
NetworkManager[10972]: <debug> [1435142179.593446] [nm-manager.c:779] remove_device(): (bond0): removing device (allow_unmanage 1, managed 1)
NetworkManager[10972]: <debug> [1435142179.596995] [devices/nm-device.c:7232] nm_device_set_unmanaged(): [0x5555559d2a40] (bond0): now unmanaged
NetworkManager[10972]: (devices/nm-device.c:8040):_set_state_full: runtime check failed: (priv->in_state_changed == FALSE)
#0 0x00007ffff4a538c3 in g_logv () at /lib64/libglib-2.0.so.0
#1 0x00007ffff4a53a3f in g_log () at /lib64/libglib-2.0.so.0
#2 0x00007ffff4a53d56 in g_warn_message () at /lib64/libglib-2.0.so.0
#3 0x00005555555b9dca in _set_state_full (self=0x5555559d2a40, state=NM_DEVICE_STATE_UNMANAGED, reason=NM_DEVICE_STATE_REASON_REMOVED, quitting=0) at devices/nm-device.c:8040
#4 0x0000555555626d7b in remove_device (manager=0x5555559631e0, device=0x5555559d2a40, quitting=0, allow_unmanage=<optimized out>) at nm-manager.c:801
#5 0x00007ffff28b7dac in ffi_call_unix64 () at /lib64/libffi.so.6
#6 0x00007ffff28b76d5 in ffi_call () at /lib64/libffi.so.6
#7 0x00007ffff4d4a628 in g_cclosure_marshal_generic () at /lib64/libgobject-2.0.so.0
#8 0x00007ffff4d49de8 in g_closure_invoke () at /lib64/libgobject-2.0.so.0
#9 0x00007ffff4d5b70d in signal_emit_unlocked_R () at /lib64/libgobject-2.0.so.0
#10 0x00007ffff4d63471 in g_signal_emit_valist () at /lib64/libgobject-2.0.so.0
#11 0x00007ffff4d63c78 in g_signal_emit_by_name () at /lib64/libgobject-2.0.so.0
#12 0x00005555555ce4ea in do_emit_signal (platform=platform@entry=0x55555594c8b0, obj=0x555555a74c50, cache_op=NMP_CACHE_OPS_REMOVED, was_visible=<optimized out>, reason=reason@entry=
NM_PLATFORM_REASON_INTERNAL) at platform/nm-linux-platform.c:1425
#13 0x00005555555ce826 in cache_prune_candidates_prune (platform=platform@entry=0x55555594c8b0) at platform/nm-linux-platform.c:1704
#14 0x00005555555d32d3 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=85, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=0)
at platform/nm-linux-platform.c:1951
#15 0x00005555555d356b in delayed_action_handle_all (ifindex=85, platform=0x55555594c8b0) at platform/nm-linux-platform.c:1491
#16 0x00005555555d356b in delayed_action_handle_all (platform=0x55555594c8b0) at platform/nm-linux-platform.c:1573
#17 0x00005555555d356b in delayed_action_handle_all (platform=platform@entry=0x55555594c8b0, read_netlink=read_netlink@entry=0) at platform/nm-linux-platform.c:1588
#18 0x00005555555d32e2 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=7, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=1)
at platform/nm-linux-platform.c:1954
#19 0x00005555555d5177 in do_change_link (platform=platform@entry=0x55555594c8b0, nlo=nlo@entry=0x55555597f0f0, complete_from_cache=complete_from_cache@entry=1) at platform/nm-linux-platform.c:2753
#20 0x00005555555d56b4 in link_enslave (platform=0x55555594c8b0, master=0, slave=7) at platform/nm-linux-platform.c:3141
#21 0x00005555555976de in release_slave (device=0x5555559d2a40, slave=0x5555559c6be0, configure=<optimized out>) at devices/nm-device-bond.c:437
#22 0x00005555555b7bc3 in nm_device_release_one_slave (self=self@entry=0x5555559d2a40, slave=0x5555559c6be0, configure=configure@entry=1, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:1049
#23 0x00005555555b7f0e in nm_device_master_release_slaves (self=self@entry=0x5555559d2a40) at devices/nm-device.c:1781
#24 0x00005555555b9592 in nm_device_cleanup (self=0x5555559d2a40, reason=<optimized out>, deconfigure=1) at devices/nm-device.c:7752
#25 0x00005555555ba161 in _set_state_full (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED, quitting=quitting@entry=0) at devices/nm-device.c:8128
#26 0x00005555555bb297 in nm_device_state_changed (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:8319
#27 0x00005555555bd9a5 in queued_set_state (user_data=<optimized out>) at devices/nm-device.c:8343
#28 0x00007ffff4a4c79a in g_main_context_dispatch () at /lib64/libglib-2.0.so.0
#29 0x00007ffff4a4cae8 in g_main_context_iterate.isra.24 () at /lib64/libglib-2.0.so.0
#30 0x00007ffff4a4cdba in g_main_loop_run () at /lib64/libglib-2.0.so.0
#31 0x000055555559556f in main (argc=1, argv=0x7fffffffdb88) at main.c:518
2015-06-24 13:42:16 +02:00
|
|
|
} PlatformLinkCbData;
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
_platform_link_cb_idle (PlatformLinkCbData *data)
|
|
|
|
|
{
|
2017-09-29 15:04:53 +02:00
|
|
|
int ifindex = data->ifindex;
|
core: delay handling of link-changed platform event in manager
Backtrace:
NetworkManager[10972]: <debug> [1435142179.593334] [platform/nm-platform.c:2962] log_ip4_route(): signal: route 4 removed: 0.0.0.0/0 via 192.168.100.1 dev 85 metric 300 mss 0 src user scope global
NetworkManager[10972]: <debug> [1435142179.593421] [platform/nm-platform.c:2944] log_link(): signal: link removed: 85: bond0 <DOWN;broadcast,multicast,master> mtu 1500 arp 1 bond* init addr 7A:AB:BE:0D:19:3D driver bond
NetworkManager[10972]: <debug> [1435142179.593446] [nm-manager.c:779] remove_device(): (bond0): removing device (allow_unmanage 1, managed 1)
NetworkManager[10972]: <debug> [1435142179.596995] [devices/nm-device.c:7232] nm_device_set_unmanaged(): [0x5555559d2a40] (bond0): now unmanaged
NetworkManager[10972]: (devices/nm-device.c:8040):_set_state_full: runtime check failed: (priv->in_state_changed == FALSE)
#0 0x00007ffff4a538c3 in g_logv () at /lib64/libglib-2.0.so.0
#1 0x00007ffff4a53a3f in g_log () at /lib64/libglib-2.0.so.0
#2 0x00007ffff4a53d56 in g_warn_message () at /lib64/libglib-2.0.so.0
#3 0x00005555555b9dca in _set_state_full (self=0x5555559d2a40, state=NM_DEVICE_STATE_UNMANAGED, reason=NM_DEVICE_STATE_REASON_REMOVED, quitting=0) at devices/nm-device.c:8040
#4 0x0000555555626d7b in remove_device (manager=0x5555559631e0, device=0x5555559d2a40, quitting=0, allow_unmanage=<optimized out>) at nm-manager.c:801
#5 0x00007ffff28b7dac in ffi_call_unix64 () at /lib64/libffi.so.6
#6 0x00007ffff28b76d5 in ffi_call () at /lib64/libffi.so.6
#7 0x00007ffff4d4a628 in g_cclosure_marshal_generic () at /lib64/libgobject-2.0.so.0
#8 0x00007ffff4d49de8 in g_closure_invoke () at /lib64/libgobject-2.0.so.0
#9 0x00007ffff4d5b70d in signal_emit_unlocked_R () at /lib64/libgobject-2.0.so.0
#10 0x00007ffff4d63471 in g_signal_emit_valist () at /lib64/libgobject-2.0.so.0
#11 0x00007ffff4d63c78 in g_signal_emit_by_name () at /lib64/libgobject-2.0.so.0
#12 0x00005555555ce4ea in do_emit_signal (platform=platform@entry=0x55555594c8b0, obj=0x555555a74c50, cache_op=NMP_CACHE_OPS_REMOVED, was_visible=<optimized out>, reason=reason@entry=
NM_PLATFORM_REASON_INTERNAL) at platform/nm-linux-platform.c:1425
#13 0x00005555555ce826 in cache_prune_candidates_prune (platform=platform@entry=0x55555594c8b0) at platform/nm-linux-platform.c:1704
#14 0x00005555555d32d3 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=85, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=0)
at platform/nm-linux-platform.c:1951
#15 0x00005555555d356b in delayed_action_handle_all (ifindex=85, platform=0x55555594c8b0) at platform/nm-linux-platform.c:1491
#16 0x00005555555d356b in delayed_action_handle_all (platform=0x55555594c8b0) at platform/nm-linux-platform.c:1573
#17 0x00005555555d356b in delayed_action_handle_all (platform=platform@entry=0x55555594c8b0, read_netlink=read_netlink@entry=0) at platform/nm-linux-platform.c:1588
#18 0x00005555555d32e2 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=7, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=1)
at platform/nm-linux-platform.c:1954
#19 0x00005555555d5177 in do_change_link (platform=platform@entry=0x55555594c8b0, nlo=nlo@entry=0x55555597f0f0, complete_from_cache=complete_from_cache@entry=1) at platform/nm-linux-platform.c:2753
#20 0x00005555555d56b4 in link_enslave (platform=0x55555594c8b0, master=0, slave=7) at platform/nm-linux-platform.c:3141
#21 0x00005555555976de in release_slave (device=0x5555559d2a40, slave=0x5555559c6be0, configure=<optimized out>) at devices/nm-device-bond.c:437
#22 0x00005555555b7bc3 in nm_device_release_one_slave (self=self@entry=0x5555559d2a40, slave=0x5555559c6be0, configure=configure@entry=1, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:1049
#23 0x00005555555b7f0e in nm_device_master_release_slaves (self=self@entry=0x5555559d2a40) at devices/nm-device.c:1781
#24 0x00005555555b9592 in nm_device_cleanup (self=0x5555559d2a40, reason=<optimized out>, deconfigure=1) at devices/nm-device.c:7752
#25 0x00005555555ba161 in _set_state_full (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED, quitting=quitting@entry=0) at devices/nm-device.c:8128
#26 0x00005555555bb297 in nm_device_state_changed (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:8319
#27 0x00005555555bd9a5 in queued_set_state (user_data=<optimized out>) at devices/nm-device.c:8343
#28 0x00007ffff4a4c79a in g_main_context_dispatch () at /lib64/libglib-2.0.so.0
#29 0x00007ffff4a4cae8 in g_main_context_iterate.isra.24 () at /lib64/libglib-2.0.so.0
#30 0x00007ffff4a4cdba in g_main_loop_run () at /lib64/libglib-2.0.so.0
#31 0x000055555559556f in main (argc=1, argv=0x7fffffffdb88) at main.c:518
2015-06-24 13:42:16 +02:00
|
|
|
NMManager *self = data->self;
|
2017-09-29 15:04:53 +02:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2017-09-29 16:58:24 +02:00
|
|
|
const NMPlatformLink *plink;
|
core: delay handling of link-changed platform event in manager
Backtrace:
NetworkManager[10972]: <debug> [1435142179.593334] [platform/nm-platform.c:2962] log_ip4_route(): signal: route 4 removed: 0.0.0.0/0 via 192.168.100.1 dev 85 metric 300 mss 0 src user scope global
NetworkManager[10972]: <debug> [1435142179.593421] [platform/nm-platform.c:2944] log_link(): signal: link removed: 85: bond0 <DOWN;broadcast,multicast,master> mtu 1500 arp 1 bond* init addr 7A:AB:BE:0D:19:3D driver bond
NetworkManager[10972]: <debug> [1435142179.593446] [nm-manager.c:779] remove_device(): (bond0): removing device (allow_unmanage 1, managed 1)
NetworkManager[10972]: <debug> [1435142179.596995] [devices/nm-device.c:7232] nm_device_set_unmanaged(): [0x5555559d2a40] (bond0): now unmanaged
NetworkManager[10972]: (devices/nm-device.c:8040):_set_state_full: runtime check failed: (priv->in_state_changed == FALSE)
#0 0x00007ffff4a538c3 in g_logv () at /lib64/libglib-2.0.so.0
#1 0x00007ffff4a53a3f in g_log () at /lib64/libglib-2.0.so.0
#2 0x00007ffff4a53d56 in g_warn_message () at /lib64/libglib-2.0.so.0
#3 0x00005555555b9dca in _set_state_full (self=0x5555559d2a40, state=NM_DEVICE_STATE_UNMANAGED, reason=NM_DEVICE_STATE_REASON_REMOVED, quitting=0) at devices/nm-device.c:8040
#4 0x0000555555626d7b in remove_device (manager=0x5555559631e0, device=0x5555559d2a40, quitting=0, allow_unmanage=<optimized out>) at nm-manager.c:801
#5 0x00007ffff28b7dac in ffi_call_unix64 () at /lib64/libffi.so.6
#6 0x00007ffff28b76d5 in ffi_call () at /lib64/libffi.so.6
#7 0x00007ffff4d4a628 in g_cclosure_marshal_generic () at /lib64/libgobject-2.0.so.0
#8 0x00007ffff4d49de8 in g_closure_invoke () at /lib64/libgobject-2.0.so.0
#9 0x00007ffff4d5b70d in signal_emit_unlocked_R () at /lib64/libgobject-2.0.so.0
#10 0x00007ffff4d63471 in g_signal_emit_valist () at /lib64/libgobject-2.0.so.0
#11 0x00007ffff4d63c78 in g_signal_emit_by_name () at /lib64/libgobject-2.0.so.0
#12 0x00005555555ce4ea in do_emit_signal (platform=platform@entry=0x55555594c8b0, obj=0x555555a74c50, cache_op=NMP_CACHE_OPS_REMOVED, was_visible=<optimized out>, reason=reason@entry=
NM_PLATFORM_REASON_INTERNAL) at platform/nm-linux-platform.c:1425
#13 0x00005555555ce826 in cache_prune_candidates_prune (platform=platform@entry=0x55555594c8b0) at platform/nm-linux-platform.c:1704
#14 0x00005555555d32d3 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=85, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=0)
at platform/nm-linux-platform.c:1951
#15 0x00005555555d356b in delayed_action_handle_all (ifindex=85, platform=0x55555594c8b0) at platform/nm-linux-platform.c:1491
#16 0x00005555555d356b in delayed_action_handle_all (platform=0x55555594c8b0) at platform/nm-linux-platform.c:1573
#17 0x00005555555d356b in delayed_action_handle_all (platform=platform@entry=0x55555594c8b0, read_netlink=read_netlink@entry=0) at platform/nm-linux-platform.c:1588
#18 0x00005555555d32e2 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=7, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=1)
at platform/nm-linux-platform.c:1954
#19 0x00005555555d5177 in do_change_link (platform=platform@entry=0x55555594c8b0, nlo=nlo@entry=0x55555597f0f0, complete_from_cache=complete_from_cache@entry=1) at platform/nm-linux-platform.c:2753
#20 0x00005555555d56b4 in link_enslave (platform=0x55555594c8b0, master=0, slave=7) at platform/nm-linux-platform.c:3141
#21 0x00005555555976de in release_slave (device=0x5555559d2a40, slave=0x5555559c6be0, configure=<optimized out>) at devices/nm-device-bond.c:437
#22 0x00005555555b7bc3 in nm_device_release_one_slave (self=self@entry=0x5555559d2a40, slave=0x5555559c6be0, configure=configure@entry=1, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:1049
#23 0x00005555555b7f0e in nm_device_master_release_slaves (self=self@entry=0x5555559d2a40) at devices/nm-device.c:1781
#24 0x00005555555b9592 in nm_device_cleanup (self=0x5555559d2a40, reason=<optimized out>, deconfigure=1) at devices/nm-device.c:7752
#25 0x00005555555ba161 in _set_state_full (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED, quitting=quitting@entry=0) at devices/nm-device.c:8128
#26 0x00005555555bb297 in nm_device_state_changed (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:8319
#27 0x00005555555bd9a5 in queued_set_state (user_data=<optimized out>) at devices/nm-device.c:8343
#28 0x00007ffff4a4c79a in g_main_context_dispatch () at /lib64/libglib-2.0.so.0
#29 0x00007ffff4a4cae8 in g_main_context_iterate.isra.24 () at /lib64/libglib-2.0.so.0
#30 0x00007ffff4a4cdba in g_main_loop_run () at /lib64/libglib-2.0.so.0
#31 0x000055555559556f in main (argc=1, argv=0x7fffffffdb88) at main.c:518
2015-06-24 13:42:16 +02:00
|
|
|
|
2017-11-28 11:22:01 +01:00
|
|
|
c_list_unlink_stale (&data->lst);
|
2017-09-29 15:04:53 +02:00
|
|
|
g_slice_free (PlatformLinkCbData, data);
|
core: delay handling of link-changed platform event in manager
Backtrace:
NetworkManager[10972]: <debug> [1435142179.593334] [platform/nm-platform.c:2962] log_ip4_route(): signal: route 4 removed: 0.0.0.0/0 via 192.168.100.1 dev 85 metric 300 mss 0 src user scope global
NetworkManager[10972]: <debug> [1435142179.593421] [platform/nm-platform.c:2944] log_link(): signal: link removed: 85: bond0 <DOWN;broadcast,multicast,master> mtu 1500 arp 1 bond* init addr 7A:AB:BE:0D:19:3D driver bond
NetworkManager[10972]: <debug> [1435142179.593446] [nm-manager.c:779] remove_device(): (bond0): removing device (allow_unmanage 1, managed 1)
NetworkManager[10972]: <debug> [1435142179.596995] [devices/nm-device.c:7232] nm_device_set_unmanaged(): [0x5555559d2a40] (bond0): now unmanaged
NetworkManager[10972]: (devices/nm-device.c:8040):_set_state_full: runtime check failed: (priv->in_state_changed == FALSE)
#0 0x00007ffff4a538c3 in g_logv () at /lib64/libglib-2.0.so.0
#1 0x00007ffff4a53a3f in g_log () at /lib64/libglib-2.0.so.0
#2 0x00007ffff4a53d56 in g_warn_message () at /lib64/libglib-2.0.so.0
#3 0x00005555555b9dca in _set_state_full (self=0x5555559d2a40, state=NM_DEVICE_STATE_UNMANAGED, reason=NM_DEVICE_STATE_REASON_REMOVED, quitting=0) at devices/nm-device.c:8040
#4 0x0000555555626d7b in remove_device (manager=0x5555559631e0, device=0x5555559d2a40, quitting=0, allow_unmanage=<optimized out>) at nm-manager.c:801
#5 0x00007ffff28b7dac in ffi_call_unix64 () at /lib64/libffi.so.6
#6 0x00007ffff28b76d5 in ffi_call () at /lib64/libffi.so.6
#7 0x00007ffff4d4a628 in g_cclosure_marshal_generic () at /lib64/libgobject-2.0.so.0
#8 0x00007ffff4d49de8 in g_closure_invoke () at /lib64/libgobject-2.0.so.0
#9 0x00007ffff4d5b70d in signal_emit_unlocked_R () at /lib64/libgobject-2.0.so.0
#10 0x00007ffff4d63471 in g_signal_emit_valist () at /lib64/libgobject-2.0.so.0
#11 0x00007ffff4d63c78 in g_signal_emit_by_name () at /lib64/libgobject-2.0.so.0
#12 0x00005555555ce4ea in do_emit_signal (platform=platform@entry=0x55555594c8b0, obj=0x555555a74c50, cache_op=NMP_CACHE_OPS_REMOVED, was_visible=<optimized out>, reason=reason@entry=
NM_PLATFORM_REASON_INTERNAL) at platform/nm-linux-platform.c:1425
#13 0x00005555555ce826 in cache_prune_candidates_prune (platform=platform@entry=0x55555594c8b0) at platform/nm-linux-platform.c:1704
#14 0x00005555555d32d3 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=85, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=0)
at platform/nm-linux-platform.c:1951
#15 0x00005555555d356b in delayed_action_handle_all (ifindex=85, platform=0x55555594c8b0) at platform/nm-linux-platform.c:1491
#16 0x00005555555d356b in delayed_action_handle_all (platform=0x55555594c8b0) at platform/nm-linux-platform.c:1573
#17 0x00005555555d356b in delayed_action_handle_all (platform=platform@entry=0x55555594c8b0, read_netlink=read_netlink@entry=0) at platform/nm-linux-platform.c:1588
#18 0x00005555555d32e2 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=7, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=1)
at platform/nm-linux-platform.c:1954
#19 0x00005555555d5177 in do_change_link (platform=platform@entry=0x55555594c8b0, nlo=nlo@entry=0x55555597f0f0, complete_from_cache=complete_from_cache@entry=1) at platform/nm-linux-platform.c:2753
#20 0x00005555555d56b4 in link_enslave (platform=0x55555594c8b0, master=0, slave=7) at platform/nm-linux-platform.c:3141
#21 0x00005555555976de in release_slave (device=0x5555559d2a40, slave=0x5555559c6be0, configure=<optimized out>) at devices/nm-device-bond.c:437
#22 0x00005555555b7bc3 in nm_device_release_one_slave (self=self@entry=0x5555559d2a40, slave=0x5555559c6be0, configure=configure@entry=1, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:1049
#23 0x00005555555b7f0e in nm_device_master_release_slaves (self=self@entry=0x5555559d2a40) at devices/nm-device.c:1781
#24 0x00005555555b9592 in nm_device_cleanup (self=0x5555559d2a40, reason=<optimized out>, deconfigure=1) at devices/nm-device.c:7752
#25 0x00005555555ba161 in _set_state_full (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED, quitting=quitting@entry=0) at devices/nm-device.c:8128
#26 0x00005555555bb297 in nm_device_state_changed (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:8319
#27 0x00005555555bd9a5 in queued_set_state (user_data=<optimized out>) at devices/nm-device.c:8343
#28 0x00007ffff4a4c79a in g_main_context_dispatch () at /lib64/libglib-2.0.so.0
#29 0x00007ffff4a4cae8 in g_main_context_iterate.isra.24 () at /lib64/libglib-2.0.so.0
#30 0x00007ffff4a4cdba in g_main_loop_run () at /lib64/libglib-2.0.so.0
#31 0x000055555559556f in main (argc=1, argv=0x7fffffffdb88) at main.c:518
2015-06-24 13:42:16 +02:00
|
|
|
|
2017-09-29 16:58:24 +02:00
|
|
|
plink = nm_platform_link_get (priv->platform, ifindex);
|
|
|
|
|
if (plink) {
|
2017-10-05 14:42:02 +02:00
|
|
|
const NMPObject *plink_keep_alive = nmp_object_ref (NMP_OBJECT_UP_CAST (plink));
|
2015-12-04 14:11:00 +01:00
|
|
|
|
2017-09-29 16:58:24 +02:00
|
|
|
platform_link_added (self, ifindex, plink, FALSE, NULL);
|
2017-10-05 14:42:02 +02:00
|
|
|
nmp_object_unref (plink_keep_alive);
|
2015-12-04 14:11:00 +01:00
|
|
|
} else {
|
|
|
|
|
NMDevice *device;
|
|
|
|
|
GError *error = NULL;
|
|
|
|
|
|
2017-09-29 15:04:53 +02:00
|
|
|
device = nm_manager_get_device_by_ifindex (self, ifindex);
|
2015-12-04 14:11:00 +01:00
|
|
|
if (device) {
|
2016-01-14 17:24:25 +01:00
|
|
|
if (nm_device_is_software (device)) {
|
2017-04-13 18:28:08 +02:00
|
|
|
nm_device_sys_iface_state_set (device, NM_DEVICE_SYS_IFACE_STATE_REMOVED);
|
2015-12-09 15:41:39 +01:00
|
|
|
/* Our software devices stick around until their connection is removed */
|
2015-12-04 14:11:00 +01:00
|
|
|
if (!nm_device_unrealize (device, FALSE, &error)) {
|
2017-06-07 12:46:10 +02:00
|
|
|
_LOG2W (LOGD_DEVICE, device, "failed to unrealize: %s", error->message);
|
2015-12-04 14:11:00 +01:00
|
|
|
g_clear_error (&error);
|
2014-09-24 16:58:07 -05:00
|
|
|
remove_device (self, device, FALSE, TRUE);
|
2017-08-16 15:44:24 +02:00
|
|
|
} else {
|
|
|
|
|
nm_device_update_from_platform_link (device, NULL);
|
2014-09-24 15:13:19 -05:00
|
|
|
}
|
2015-12-04 14:11:00 +01:00
|
|
|
} else {
|
2015-12-09 15:41:39 +01:00
|
|
|
/* Hardware and external devices always get removed when their kernel link is gone */
|
2015-12-04 14:11:00 +01:00
|
|
|
remove_device (self, device, FALSE, TRUE);
|
2014-09-24 15:13:19 -05:00
|
|
|
}
|
core: delay handling of link-changed platform event in manager
Backtrace:
NetworkManager[10972]: <debug> [1435142179.593334] [platform/nm-platform.c:2962] log_ip4_route(): signal: route 4 removed: 0.0.0.0/0 via 192.168.100.1 dev 85 metric 300 mss 0 src user scope global
NetworkManager[10972]: <debug> [1435142179.593421] [platform/nm-platform.c:2944] log_link(): signal: link removed: 85: bond0 <DOWN;broadcast,multicast,master> mtu 1500 arp 1 bond* init addr 7A:AB:BE:0D:19:3D driver bond
NetworkManager[10972]: <debug> [1435142179.593446] [nm-manager.c:779] remove_device(): (bond0): removing device (allow_unmanage 1, managed 1)
NetworkManager[10972]: <debug> [1435142179.596995] [devices/nm-device.c:7232] nm_device_set_unmanaged(): [0x5555559d2a40] (bond0): now unmanaged
NetworkManager[10972]: (devices/nm-device.c:8040):_set_state_full: runtime check failed: (priv->in_state_changed == FALSE)
#0 0x00007ffff4a538c3 in g_logv () at /lib64/libglib-2.0.so.0
#1 0x00007ffff4a53a3f in g_log () at /lib64/libglib-2.0.so.0
#2 0x00007ffff4a53d56 in g_warn_message () at /lib64/libglib-2.0.so.0
#3 0x00005555555b9dca in _set_state_full (self=0x5555559d2a40, state=NM_DEVICE_STATE_UNMANAGED, reason=NM_DEVICE_STATE_REASON_REMOVED, quitting=0) at devices/nm-device.c:8040
#4 0x0000555555626d7b in remove_device (manager=0x5555559631e0, device=0x5555559d2a40, quitting=0, allow_unmanage=<optimized out>) at nm-manager.c:801
#5 0x00007ffff28b7dac in ffi_call_unix64 () at /lib64/libffi.so.6
#6 0x00007ffff28b76d5 in ffi_call () at /lib64/libffi.so.6
#7 0x00007ffff4d4a628 in g_cclosure_marshal_generic () at /lib64/libgobject-2.0.so.0
#8 0x00007ffff4d49de8 in g_closure_invoke () at /lib64/libgobject-2.0.so.0
#9 0x00007ffff4d5b70d in signal_emit_unlocked_R () at /lib64/libgobject-2.0.so.0
#10 0x00007ffff4d63471 in g_signal_emit_valist () at /lib64/libgobject-2.0.so.0
#11 0x00007ffff4d63c78 in g_signal_emit_by_name () at /lib64/libgobject-2.0.so.0
#12 0x00005555555ce4ea in do_emit_signal (platform=platform@entry=0x55555594c8b0, obj=0x555555a74c50, cache_op=NMP_CACHE_OPS_REMOVED, was_visible=<optimized out>, reason=reason@entry=
NM_PLATFORM_REASON_INTERNAL) at platform/nm-linux-platform.c:1425
#13 0x00005555555ce826 in cache_prune_candidates_prune (platform=platform@entry=0x55555594c8b0) at platform/nm-linux-platform.c:1704
#14 0x00005555555d32d3 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=85, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=0)
at platform/nm-linux-platform.c:1951
#15 0x00005555555d356b in delayed_action_handle_all (ifindex=85, platform=0x55555594c8b0) at platform/nm-linux-platform.c:1491
#16 0x00005555555d356b in delayed_action_handle_all (platform=0x55555594c8b0) at platform/nm-linux-platform.c:1573
#17 0x00005555555d356b in delayed_action_handle_all (platform=platform@entry=0x55555594c8b0, read_netlink=read_netlink@entry=0) at platform/nm-linux-platform.c:1588
#18 0x00005555555d32e2 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=7, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=1)
at platform/nm-linux-platform.c:1954
#19 0x00005555555d5177 in do_change_link (platform=platform@entry=0x55555594c8b0, nlo=nlo@entry=0x55555597f0f0, complete_from_cache=complete_from_cache@entry=1) at platform/nm-linux-platform.c:2753
#20 0x00005555555d56b4 in link_enslave (platform=0x55555594c8b0, master=0, slave=7) at platform/nm-linux-platform.c:3141
#21 0x00005555555976de in release_slave (device=0x5555559d2a40, slave=0x5555559c6be0, configure=<optimized out>) at devices/nm-device-bond.c:437
#22 0x00005555555b7bc3 in nm_device_release_one_slave (self=self@entry=0x5555559d2a40, slave=0x5555559c6be0, configure=configure@entry=1, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:1049
#23 0x00005555555b7f0e in nm_device_master_release_slaves (self=self@entry=0x5555559d2a40) at devices/nm-device.c:1781
#24 0x00005555555b9592 in nm_device_cleanup (self=0x5555559d2a40, reason=<optimized out>, deconfigure=1) at devices/nm-device.c:7752
#25 0x00005555555ba161 in _set_state_full (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED, quitting=quitting@entry=0) at devices/nm-device.c:8128
#26 0x00005555555bb297 in nm_device_state_changed (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:8319
#27 0x00005555555bd9a5 in queued_set_state (user_data=<optimized out>) at devices/nm-device.c:8343
#28 0x00007ffff4a4c79a in g_main_context_dispatch () at /lib64/libglib-2.0.so.0
#29 0x00007ffff4a4cae8 in g_main_context_iterate.isra.24 () at /lib64/libglib-2.0.so.0
#30 0x00007ffff4a4cdba in g_main_loop_run () at /lib64/libglib-2.0.so.0
#31 0x000055555559556f in main (argc=1, argv=0x7fffffffdb88) at main.c:518
2015-06-24 13:42:16 +02:00
|
|
|
}
|
|
|
|
|
}
|
2015-12-04 14:11:00 +01:00
|
|
|
|
core: delay handling of link-changed platform event in manager
Backtrace:
NetworkManager[10972]: <debug> [1435142179.593334] [platform/nm-platform.c:2962] log_ip4_route(): signal: route 4 removed: 0.0.0.0/0 via 192.168.100.1 dev 85 metric 300 mss 0 src user scope global
NetworkManager[10972]: <debug> [1435142179.593421] [platform/nm-platform.c:2944] log_link(): signal: link removed: 85: bond0 <DOWN;broadcast,multicast,master> mtu 1500 arp 1 bond* init addr 7A:AB:BE:0D:19:3D driver bond
NetworkManager[10972]: <debug> [1435142179.593446] [nm-manager.c:779] remove_device(): (bond0): removing device (allow_unmanage 1, managed 1)
NetworkManager[10972]: <debug> [1435142179.596995] [devices/nm-device.c:7232] nm_device_set_unmanaged(): [0x5555559d2a40] (bond0): now unmanaged
NetworkManager[10972]: (devices/nm-device.c:8040):_set_state_full: runtime check failed: (priv->in_state_changed == FALSE)
#0 0x00007ffff4a538c3 in g_logv () at /lib64/libglib-2.0.so.0
#1 0x00007ffff4a53a3f in g_log () at /lib64/libglib-2.0.so.0
#2 0x00007ffff4a53d56 in g_warn_message () at /lib64/libglib-2.0.so.0
#3 0x00005555555b9dca in _set_state_full (self=0x5555559d2a40, state=NM_DEVICE_STATE_UNMANAGED, reason=NM_DEVICE_STATE_REASON_REMOVED, quitting=0) at devices/nm-device.c:8040
#4 0x0000555555626d7b in remove_device (manager=0x5555559631e0, device=0x5555559d2a40, quitting=0, allow_unmanage=<optimized out>) at nm-manager.c:801
#5 0x00007ffff28b7dac in ffi_call_unix64 () at /lib64/libffi.so.6
#6 0x00007ffff28b76d5 in ffi_call () at /lib64/libffi.so.6
#7 0x00007ffff4d4a628 in g_cclosure_marshal_generic () at /lib64/libgobject-2.0.so.0
#8 0x00007ffff4d49de8 in g_closure_invoke () at /lib64/libgobject-2.0.so.0
#9 0x00007ffff4d5b70d in signal_emit_unlocked_R () at /lib64/libgobject-2.0.so.0
#10 0x00007ffff4d63471 in g_signal_emit_valist () at /lib64/libgobject-2.0.so.0
#11 0x00007ffff4d63c78 in g_signal_emit_by_name () at /lib64/libgobject-2.0.so.0
#12 0x00005555555ce4ea in do_emit_signal (platform=platform@entry=0x55555594c8b0, obj=0x555555a74c50, cache_op=NMP_CACHE_OPS_REMOVED, was_visible=<optimized out>, reason=reason@entry=
NM_PLATFORM_REASON_INTERNAL) at platform/nm-linux-platform.c:1425
#13 0x00005555555ce826 in cache_prune_candidates_prune (platform=platform@entry=0x55555594c8b0) at platform/nm-linux-platform.c:1704
#14 0x00005555555d32d3 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=85, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=0)
at platform/nm-linux-platform.c:1951
#15 0x00005555555d356b in delayed_action_handle_all (ifindex=85, platform=0x55555594c8b0) at platform/nm-linux-platform.c:1491
#16 0x00005555555d356b in delayed_action_handle_all (platform=0x55555594c8b0) at platform/nm-linux-platform.c:1573
#17 0x00005555555d356b in delayed_action_handle_all (platform=platform@entry=0x55555594c8b0, read_netlink=read_netlink@entry=0) at platform/nm-linux-platform.c:1588
#18 0x00005555555d32e2 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=7, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=1)
at platform/nm-linux-platform.c:1954
#19 0x00005555555d5177 in do_change_link (platform=platform@entry=0x55555594c8b0, nlo=nlo@entry=0x55555597f0f0, complete_from_cache=complete_from_cache@entry=1) at platform/nm-linux-platform.c:2753
#20 0x00005555555d56b4 in link_enslave (platform=0x55555594c8b0, master=0, slave=7) at platform/nm-linux-platform.c:3141
#21 0x00005555555976de in release_slave (device=0x5555559d2a40, slave=0x5555559c6be0, configure=<optimized out>) at devices/nm-device-bond.c:437
#22 0x00005555555b7bc3 in nm_device_release_one_slave (self=self@entry=0x5555559d2a40, slave=0x5555559c6be0, configure=configure@entry=1, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:1049
#23 0x00005555555b7f0e in nm_device_master_release_slaves (self=self@entry=0x5555559d2a40) at devices/nm-device.c:1781
#24 0x00005555555b9592 in nm_device_cleanup (self=0x5555559d2a40, reason=<optimized out>, deconfigure=1) at devices/nm-device.c:7752
#25 0x00005555555ba161 in _set_state_full (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED, quitting=quitting@entry=0) at devices/nm-device.c:8128
#26 0x00005555555bb297 in nm_device_state_changed (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:8319
#27 0x00005555555bd9a5 in queued_set_state (user_data=<optimized out>) at devices/nm-device.c:8343
#28 0x00007ffff4a4c79a in g_main_context_dispatch () at /lib64/libglib-2.0.so.0
#29 0x00007ffff4a4cae8 in g_main_context_iterate.isra.24 () at /lib64/libglib-2.0.so.0
#30 0x00007ffff4a4cdba in g_main_loop_run () at /lib64/libglib-2.0.so.0
#31 0x000055555559556f in main (argc=1, argv=0x7fffffffdb88) at main.c:518
2015-06-24 13:42:16 +02:00
|
|
|
return G_SOURCE_REMOVE;
|
|
|
|
|
}
|
|
|
|
|
|
2008-04-29 Dan Williams <dcbw@redhat.com>
Handle HAL dropouts better; allow NM to start up even if HAL isn't up yet.
* marshallers/nm-marshal.list
- Add marshaller
* src/NetworkManager.c
- (main): let the NMManager handle the NMHalManager
* src/nm-hal-manager.c
src/nm-hal-manager.h
- convert to a GObject, and emit singals when stuff changes. Let the
NMManager handle the signals, instead of the NMHalManager calling
into the NMManager.
* src/nm-manager.c
src/nm-manager.h
- (remove_one_device): consolidate device removals here
- (dispose): use remove_one_device()
- (nm_manager_get_device_by_udi): make static
- (deferred_hal_manager_query_devices): idle handler to query the HAL
manager for devices at startup or wakeup time
- (nm_manager_new): create and monitor the HAL manager
- (hal_manager_udi_added_cb): new function; do what
nm_manager_add_device() used to do when signalled by the hal manager
- (hal_manager_udi_removed_cb): new function; do what
nm_manager_remove_device() used to do when signalled by the hal
manager
- (hal_manager_rfkill_changed_cb): handle rfkill changes from the
hal manager
- (hal_manager_hal_reappeared_cb): when HAL comes back, remove devices
in our device list that aren't known to HAL
- (impl_manager_sleep): on wakeup, re-add devices from an idle handler;
see comments on nm-hal-manager.c::nm_manager_state_changed() a few
commits ago
- (nm_manager_get_device_by_path, nm_manager_is_udi_managed,
nm_manager_activation_pending, nm_manager_wireless_enabled,
nm_manager_wireless_hardware_enabled,
nm_manager_set_wireless_hardware_enabled): remove, unused
git-svn-id: http://svn-archive.gnome.org/svn/NetworkManager/trunk@3619 4912f4e0-d625-0410-9fb7-b9a5a253dbdc
2008-04-29 23:03:00 +00:00
|
|
|
static void
|
2014-03-07 19:04:38 +01:00
|
|
|
platform_link_cb (NMPlatform *platform,
|
2016-10-22 13:08:36 +02:00
|
|
|
int obj_type_i,
|
2014-03-07 19:04:38 +01:00
|
|
|
int ifindex,
|
|
|
|
|
NMPlatformLink *plink,
|
2016-10-22 13:08:36 +02:00
|
|
|
int change_type_i,
|
2014-03-07 19:04:38 +01:00
|
|
|
gpointer user_data)
|
2007-02-08 15:34:26 +00:00
|
|
|
{
|
2017-09-29 15:04:53 +02:00
|
|
|
NMManager *self;
|
|
|
|
|
NMManagerPrivate *priv;
|
2016-10-22 13:08:36 +02:00
|
|
|
const NMPlatformSignalChangeType change_type = change_type_i;
|
core: delay handling of link-changed platform event in manager
Backtrace:
NetworkManager[10972]: <debug> [1435142179.593334] [platform/nm-platform.c:2962] log_ip4_route(): signal: route 4 removed: 0.0.0.0/0 via 192.168.100.1 dev 85 metric 300 mss 0 src user scope global
NetworkManager[10972]: <debug> [1435142179.593421] [platform/nm-platform.c:2944] log_link(): signal: link removed: 85: bond0 <DOWN;broadcast,multicast,master> mtu 1500 arp 1 bond* init addr 7A:AB:BE:0D:19:3D driver bond
NetworkManager[10972]: <debug> [1435142179.593446] [nm-manager.c:779] remove_device(): (bond0): removing device (allow_unmanage 1, managed 1)
NetworkManager[10972]: <debug> [1435142179.596995] [devices/nm-device.c:7232] nm_device_set_unmanaged(): [0x5555559d2a40] (bond0): now unmanaged
NetworkManager[10972]: (devices/nm-device.c:8040):_set_state_full: runtime check failed: (priv->in_state_changed == FALSE)
#0 0x00007ffff4a538c3 in g_logv () at /lib64/libglib-2.0.so.0
#1 0x00007ffff4a53a3f in g_log () at /lib64/libglib-2.0.so.0
#2 0x00007ffff4a53d56 in g_warn_message () at /lib64/libglib-2.0.so.0
#3 0x00005555555b9dca in _set_state_full (self=0x5555559d2a40, state=NM_DEVICE_STATE_UNMANAGED, reason=NM_DEVICE_STATE_REASON_REMOVED, quitting=0) at devices/nm-device.c:8040
#4 0x0000555555626d7b in remove_device (manager=0x5555559631e0, device=0x5555559d2a40, quitting=0, allow_unmanage=<optimized out>) at nm-manager.c:801
#5 0x00007ffff28b7dac in ffi_call_unix64 () at /lib64/libffi.so.6
#6 0x00007ffff28b76d5 in ffi_call () at /lib64/libffi.so.6
#7 0x00007ffff4d4a628 in g_cclosure_marshal_generic () at /lib64/libgobject-2.0.so.0
#8 0x00007ffff4d49de8 in g_closure_invoke () at /lib64/libgobject-2.0.so.0
#9 0x00007ffff4d5b70d in signal_emit_unlocked_R () at /lib64/libgobject-2.0.so.0
#10 0x00007ffff4d63471 in g_signal_emit_valist () at /lib64/libgobject-2.0.so.0
#11 0x00007ffff4d63c78 in g_signal_emit_by_name () at /lib64/libgobject-2.0.so.0
#12 0x00005555555ce4ea in do_emit_signal (platform=platform@entry=0x55555594c8b0, obj=0x555555a74c50, cache_op=NMP_CACHE_OPS_REMOVED, was_visible=<optimized out>, reason=reason@entry=
NM_PLATFORM_REASON_INTERNAL) at platform/nm-linux-platform.c:1425
#13 0x00005555555ce826 in cache_prune_candidates_prune (platform=platform@entry=0x55555594c8b0) at platform/nm-linux-platform.c:1704
#14 0x00005555555d32d3 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=85, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=0)
at platform/nm-linux-platform.c:1951
#15 0x00005555555d356b in delayed_action_handle_all (ifindex=85, platform=0x55555594c8b0) at platform/nm-linux-platform.c:1491
#16 0x00005555555d356b in delayed_action_handle_all (platform=0x55555594c8b0) at platform/nm-linux-platform.c:1573
#17 0x00005555555d356b in delayed_action_handle_all (platform=platform@entry=0x55555594c8b0, read_netlink=read_netlink@entry=0) at platform/nm-linux-platform.c:1588
#18 0x00005555555d32e2 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=7, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=1)
at platform/nm-linux-platform.c:1954
#19 0x00005555555d5177 in do_change_link (platform=platform@entry=0x55555594c8b0, nlo=nlo@entry=0x55555597f0f0, complete_from_cache=complete_from_cache@entry=1) at platform/nm-linux-platform.c:2753
#20 0x00005555555d56b4 in link_enslave (platform=0x55555594c8b0, master=0, slave=7) at platform/nm-linux-platform.c:3141
#21 0x00005555555976de in release_slave (device=0x5555559d2a40, slave=0x5555559c6be0, configure=<optimized out>) at devices/nm-device-bond.c:437
#22 0x00005555555b7bc3 in nm_device_release_one_slave (self=self@entry=0x5555559d2a40, slave=0x5555559c6be0, configure=configure@entry=1, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:1049
#23 0x00005555555b7f0e in nm_device_master_release_slaves (self=self@entry=0x5555559d2a40) at devices/nm-device.c:1781
#24 0x00005555555b9592 in nm_device_cleanup (self=0x5555559d2a40, reason=<optimized out>, deconfigure=1) at devices/nm-device.c:7752
#25 0x00005555555ba161 in _set_state_full (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED, quitting=quitting@entry=0) at devices/nm-device.c:8128
#26 0x00005555555bb297 in nm_device_state_changed (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:8319
#27 0x00005555555bd9a5 in queued_set_state (user_data=<optimized out>) at devices/nm-device.c:8343
#28 0x00007ffff4a4c79a in g_main_context_dispatch () at /lib64/libglib-2.0.so.0
#29 0x00007ffff4a4cae8 in g_main_context_iterate.isra.24 () at /lib64/libglib-2.0.so.0
#30 0x00007ffff4a4cdba in g_main_loop_run () at /lib64/libglib-2.0.so.0
#31 0x000055555559556f in main (argc=1, argv=0x7fffffffdb88) at main.c:518
2015-06-24 13:42:16 +02:00
|
|
|
PlatformLinkCbData *data;
|
|
|
|
|
|
2014-03-07 19:04:38 +01:00
|
|
|
switch (change_type) {
|
|
|
|
|
case NM_PLATFORM_SIGNAL_ADDED:
|
core: delay handling of link-changed platform event in manager
Backtrace:
NetworkManager[10972]: <debug> [1435142179.593334] [platform/nm-platform.c:2962] log_ip4_route(): signal: route 4 removed: 0.0.0.0/0 via 192.168.100.1 dev 85 metric 300 mss 0 src user scope global
NetworkManager[10972]: <debug> [1435142179.593421] [platform/nm-platform.c:2944] log_link(): signal: link removed: 85: bond0 <DOWN;broadcast,multicast,master> mtu 1500 arp 1 bond* init addr 7A:AB:BE:0D:19:3D driver bond
NetworkManager[10972]: <debug> [1435142179.593446] [nm-manager.c:779] remove_device(): (bond0): removing device (allow_unmanage 1, managed 1)
NetworkManager[10972]: <debug> [1435142179.596995] [devices/nm-device.c:7232] nm_device_set_unmanaged(): [0x5555559d2a40] (bond0): now unmanaged
NetworkManager[10972]: (devices/nm-device.c:8040):_set_state_full: runtime check failed: (priv->in_state_changed == FALSE)
#0 0x00007ffff4a538c3 in g_logv () at /lib64/libglib-2.0.so.0
#1 0x00007ffff4a53a3f in g_log () at /lib64/libglib-2.0.so.0
#2 0x00007ffff4a53d56 in g_warn_message () at /lib64/libglib-2.0.so.0
#3 0x00005555555b9dca in _set_state_full (self=0x5555559d2a40, state=NM_DEVICE_STATE_UNMANAGED, reason=NM_DEVICE_STATE_REASON_REMOVED, quitting=0) at devices/nm-device.c:8040
#4 0x0000555555626d7b in remove_device (manager=0x5555559631e0, device=0x5555559d2a40, quitting=0, allow_unmanage=<optimized out>) at nm-manager.c:801
#5 0x00007ffff28b7dac in ffi_call_unix64 () at /lib64/libffi.so.6
#6 0x00007ffff28b76d5 in ffi_call () at /lib64/libffi.so.6
#7 0x00007ffff4d4a628 in g_cclosure_marshal_generic () at /lib64/libgobject-2.0.so.0
#8 0x00007ffff4d49de8 in g_closure_invoke () at /lib64/libgobject-2.0.so.0
#9 0x00007ffff4d5b70d in signal_emit_unlocked_R () at /lib64/libgobject-2.0.so.0
#10 0x00007ffff4d63471 in g_signal_emit_valist () at /lib64/libgobject-2.0.so.0
#11 0x00007ffff4d63c78 in g_signal_emit_by_name () at /lib64/libgobject-2.0.so.0
#12 0x00005555555ce4ea in do_emit_signal (platform=platform@entry=0x55555594c8b0, obj=0x555555a74c50, cache_op=NMP_CACHE_OPS_REMOVED, was_visible=<optimized out>, reason=reason@entry=
NM_PLATFORM_REASON_INTERNAL) at platform/nm-linux-platform.c:1425
#13 0x00005555555ce826 in cache_prune_candidates_prune (platform=platform@entry=0x55555594c8b0) at platform/nm-linux-platform.c:1704
#14 0x00005555555d32d3 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=85, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=0)
at platform/nm-linux-platform.c:1951
#15 0x00005555555d356b in delayed_action_handle_all (ifindex=85, platform=0x55555594c8b0) at platform/nm-linux-platform.c:1491
#16 0x00005555555d356b in delayed_action_handle_all (platform=0x55555594c8b0) at platform/nm-linux-platform.c:1573
#17 0x00005555555d356b in delayed_action_handle_all (platform=platform@entry=0x55555594c8b0, read_netlink=read_netlink@entry=0) at platform/nm-linux-platform.c:1588
#18 0x00005555555d32e2 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=7, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=1)
at platform/nm-linux-platform.c:1954
#19 0x00005555555d5177 in do_change_link (platform=platform@entry=0x55555594c8b0, nlo=nlo@entry=0x55555597f0f0, complete_from_cache=complete_from_cache@entry=1) at platform/nm-linux-platform.c:2753
#20 0x00005555555d56b4 in link_enslave (platform=0x55555594c8b0, master=0, slave=7) at platform/nm-linux-platform.c:3141
#21 0x00005555555976de in release_slave (device=0x5555559d2a40, slave=0x5555559c6be0, configure=<optimized out>) at devices/nm-device-bond.c:437
#22 0x00005555555b7bc3 in nm_device_release_one_slave (self=self@entry=0x5555559d2a40, slave=0x5555559c6be0, configure=configure@entry=1, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:1049
#23 0x00005555555b7f0e in nm_device_master_release_slaves (self=self@entry=0x5555559d2a40) at devices/nm-device.c:1781
#24 0x00005555555b9592 in nm_device_cleanup (self=0x5555559d2a40, reason=<optimized out>, deconfigure=1) at devices/nm-device.c:7752
#25 0x00005555555ba161 in _set_state_full (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED, quitting=quitting@entry=0) at devices/nm-device.c:8128
#26 0x00005555555bb297 in nm_device_state_changed (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:8319
#27 0x00005555555bd9a5 in queued_set_state (user_data=<optimized out>) at devices/nm-device.c:8343
#28 0x00007ffff4a4c79a in g_main_context_dispatch () at /lib64/libglib-2.0.so.0
#29 0x00007ffff4a4cae8 in g_main_context_iterate.isra.24 () at /lib64/libglib-2.0.so.0
#30 0x00007ffff4a4cdba in g_main_loop_run () at /lib64/libglib-2.0.so.0
#31 0x000055555559556f in main (argc=1, argv=0x7fffffffdb88) at main.c:518
2015-06-24 13:42:16 +02:00
|
|
|
case NM_PLATFORM_SIGNAL_REMOVED:
|
2017-09-29 15:04:53 +02:00
|
|
|
self = NM_MANAGER (user_data);
|
|
|
|
|
priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
|
core: delay handling of link-changed platform event in manager
Backtrace:
NetworkManager[10972]: <debug> [1435142179.593334] [platform/nm-platform.c:2962] log_ip4_route(): signal: route 4 removed: 0.0.0.0/0 via 192.168.100.1 dev 85 metric 300 mss 0 src user scope global
NetworkManager[10972]: <debug> [1435142179.593421] [platform/nm-platform.c:2944] log_link(): signal: link removed: 85: bond0 <DOWN;broadcast,multicast,master> mtu 1500 arp 1 bond* init addr 7A:AB:BE:0D:19:3D driver bond
NetworkManager[10972]: <debug> [1435142179.593446] [nm-manager.c:779] remove_device(): (bond0): removing device (allow_unmanage 1, managed 1)
NetworkManager[10972]: <debug> [1435142179.596995] [devices/nm-device.c:7232] nm_device_set_unmanaged(): [0x5555559d2a40] (bond0): now unmanaged
NetworkManager[10972]: (devices/nm-device.c:8040):_set_state_full: runtime check failed: (priv->in_state_changed == FALSE)
#0 0x00007ffff4a538c3 in g_logv () at /lib64/libglib-2.0.so.0
#1 0x00007ffff4a53a3f in g_log () at /lib64/libglib-2.0.so.0
#2 0x00007ffff4a53d56 in g_warn_message () at /lib64/libglib-2.0.so.0
#3 0x00005555555b9dca in _set_state_full (self=0x5555559d2a40, state=NM_DEVICE_STATE_UNMANAGED, reason=NM_DEVICE_STATE_REASON_REMOVED, quitting=0) at devices/nm-device.c:8040
#4 0x0000555555626d7b in remove_device (manager=0x5555559631e0, device=0x5555559d2a40, quitting=0, allow_unmanage=<optimized out>) at nm-manager.c:801
#5 0x00007ffff28b7dac in ffi_call_unix64 () at /lib64/libffi.so.6
#6 0x00007ffff28b76d5 in ffi_call () at /lib64/libffi.so.6
#7 0x00007ffff4d4a628 in g_cclosure_marshal_generic () at /lib64/libgobject-2.0.so.0
#8 0x00007ffff4d49de8 in g_closure_invoke () at /lib64/libgobject-2.0.so.0
#9 0x00007ffff4d5b70d in signal_emit_unlocked_R () at /lib64/libgobject-2.0.so.0
#10 0x00007ffff4d63471 in g_signal_emit_valist () at /lib64/libgobject-2.0.so.0
#11 0x00007ffff4d63c78 in g_signal_emit_by_name () at /lib64/libgobject-2.0.so.0
#12 0x00005555555ce4ea in do_emit_signal (platform=platform@entry=0x55555594c8b0, obj=0x555555a74c50, cache_op=NMP_CACHE_OPS_REMOVED, was_visible=<optimized out>, reason=reason@entry=
NM_PLATFORM_REASON_INTERNAL) at platform/nm-linux-platform.c:1425
#13 0x00005555555ce826 in cache_prune_candidates_prune (platform=platform@entry=0x55555594c8b0) at platform/nm-linux-platform.c:1704
#14 0x00005555555d32d3 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=85, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=0)
at platform/nm-linux-platform.c:1951
#15 0x00005555555d356b in delayed_action_handle_all (ifindex=85, platform=0x55555594c8b0) at platform/nm-linux-platform.c:1491
#16 0x00005555555d356b in delayed_action_handle_all (platform=0x55555594c8b0) at platform/nm-linux-platform.c:1573
#17 0x00005555555d356b in delayed_action_handle_all (platform=platform@entry=0x55555594c8b0, read_netlink=read_netlink@entry=0) at platform/nm-linux-platform.c:1588
#18 0x00005555555d32e2 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=7, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=1)
at platform/nm-linux-platform.c:1954
#19 0x00005555555d5177 in do_change_link (platform=platform@entry=0x55555594c8b0, nlo=nlo@entry=0x55555597f0f0, complete_from_cache=complete_from_cache@entry=1) at platform/nm-linux-platform.c:2753
#20 0x00005555555d56b4 in link_enslave (platform=0x55555594c8b0, master=0, slave=7) at platform/nm-linux-platform.c:3141
#21 0x00005555555976de in release_slave (device=0x5555559d2a40, slave=0x5555559c6be0, configure=<optimized out>) at devices/nm-device-bond.c:437
#22 0x00005555555b7bc3 in nm_device_release_one_slave (self=self@entry=0x5555559d2a40, slave=0x5555559c6be0, configure=configure@entry=1, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:1049
#23 0x00005555555b7f0e in nm_device_master_release_slaves (self=self@entry=0x5555559d2a40) at devices/nm-device.c:1781
#24 0x00005555555b9592 in nm_device_cleanup (self=0x5555559d2a40, reason=<optimized out>, deconfigure=1) at devices/nm-device.c:7752
#25 0x00005555555ba161 in _set_state_full (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED, quitting=quitting@entry=0) at devices/nm-device.c:8128
#26 0x00005555555bb297 in nm_device_state_changed (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:8319
#27 0x00005555555bd9a5 in queued_set_state (user_data=<optimized out>) at devices/nm-device.c:8343
#28 0x00007ffff4a4c79a in g_main_context_dispatch () at /lib64/libglib-2.0.so.0
#29 0x00007ffff4a4cae8 in g_main_context_iterate.isra.24 () at /lib64/libglib-2.0.so.0
#30 0x00007ffff4a4cdba in g_main_loop_run () at /lib64/libglib-2.0.so.0
#31 0x000055555559556f in main (argc=1, argv=0x7fffffffdb88) at main.c:518
2015-06-24 13:42:16 +02:00
|
|
|
data = g_slice_new (PlatformLinkCbData);
|
2017-09-29 15:04:53 +02:00
|
|
|
data->self = self;
|
core: delay handling of link-changed platform event in manager
Backtrace:
NetworkManager[10972]: <debug> [1435142179.593334] [platform/nm-platform.c:2962] log_ip4_route(): signal: route 4 removed: 0.0.0.0/0 via 192.168.100.1 dev 85 metric 300 mss 0 src user scope global
NetworkManager[10972]: <debug> [1435142179.593421] [platform/nm-platform.c:2944] log_link(): signal: link removed: 85: bond0 <DOWN;broadcast,multicast,master> mtu 1500 arp 1 bond* init addr 7A:AB:BE:0D:19:3D driver bond
NetworkManager[10972]: <debug> [1435142179.593446] [nm-manager.c:779] remove_device(): (bond0): removing device (allow_unmanage 1, managed 1)
NetworkManager[10972]: <debug> [1435142179.596995] [devices/nm-device.c:7232] nm_device_set_unmanaged(): [0x5555559d2a40] (bond0): now unmanaged
NetworkManager[10972]: (devices/nm-device.c:8040):_set_state_full: runtime check failed: (priv->in_state_changed == FALSE)
#0 0x00007ffff4a538c3 in g_logv () at /lib64/libglib-2.0.so.0
#1 0x00007ffff4a53a3f in g_log () at /lib64/libglib-2.0.so.0
#2 0x00007ffff4a53d56 in g_warn_message () at /lib64/libglib-2.0.so.0
#3 0x00005555555b9dca in _set_state_full (self=0x5555559d2a40, state=NM_DEVICE_STATE_UNMANAGED, reason=NM_DEVICE_STATE_REASON_REMOVED, quitting=0) at devices/nm-device.c:8040
#4 0x0000555555626d7b in remove_device (manager=0x5555559631e0, device=0x5555559d2a40, quitting=0, allow_unmanage=<optimized out>) at nm-manager.c:801
#5 0x00007ffff28b7dac in ffi_call_unix64 () at /lib64/libffi.so.6
#6 0x00007ffff28b76d5 in ffi_call () at /lib64/libffi.so.6
#7 0x00007ffff4d4a628 in g_cclosure_marshal_generic () at /lib64/libgobject-2.0.so.0
#8 0x00007ffff4d49de8 in g_closure_invoke () at /lib64/libgobject-2.0.so.0
#9 0x00007ffff4d5b70d in signal_emit_unlocked_R () at /lib64/libgobject-2.0.so.0
#10 0x00007ffff4d63471 in g_signal_emit_valist () at /lib64/libgobject-2.0.so.0
#11 0x00007ffff4d63c78 in g_signal_emit_by_name () at /lib64/libgobject-2.0.so.0
#12 0x00005555555ce4ea in do_emit_signal (platform=platform@entry=0x55555594c8b0, obj=0x555555a74c50, cache_op=NMP_CACHE_OPS_REMOVED, was_visible=<optimized out>, reason=reason@entry=
NM_PLATFORM_REASON_INTERNAL) at platform/nm-linux-platform.c:1425
#13 0x00005555555ce826 in cache_prune_candidates_prune (platform=platform@entry=0x55555594c8b0) at platform/nm-linux-platform.c:1704
#14 0x00005555555d32d3 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=85, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=0)
at platform/nm-linux-platform.c:1951
#15 0x00005555555d356b in delayed_action_handle_all (ifindex=85, platform=0x55555594c8b0) at platform/nm-linux-platform.c:1491
#16 0x00005555555d356b in delayed_action_handle_all (platform=0x55555594c8b0) at platform/nm-linux-platform.c:1573
#17 0x00005555555d356b in delayed_action_handle_all (platform=platform@entry=0x55555594c8b0, read_netlink=read_netlink@entry=0) at platform/nm-linux-platform.c:1588
#18 0x00005555555d32e2 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=7, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=1)
at platform/nm-linux-platform.c:1954
#19 0x00005555555d5177 in do_change_link (platform=platform@entry=0x55555594c8b0, nlo=nlo@entry=0x55555597f0f0, complete_from_cache=complete_from_cache@entry=1) at platform/nm-linux-platform.c:2753
#20 0x00005555555d56b4 in link_enslave (platform=0x55555594c8b0, master=0, slave=7) at platform/nm-linux-platform.c:3141
#21 0x00005555555976de in release_slave (device=0x5555559d2a40, slave=0x5555559c6be0, configure=<optimized out>) at devices/nm-device-bond.c:437
#22 0x00005555555b7bc3 in nm_device_release_one_slave (self=self@entry=0x5555559d2a40, slave=0x5555559c6be0, configure=configure@entry=1, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:1049
#23 0x00005555555b7f0e in nm_device_master_release_slaves (self=self@entry=0x5555559d2a40) at devices/nm-device.c:1781
#24 0x00005555555b9592 in nm_device_cleanup (self=0x5555559d2a40, reason=<optimized out>, deconfigure=1) at devices/nm-device.c:7752
#25 0x00005555555ba161 in _set_state_full (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED, quitting=quitting@entry=0) at devices/nm-device.c:8128
#26 0x00005555555bb297 in nm_device_state_changed (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:8319
#27 0x00005555555bd9a5 in queued_set_state (user_data=<optimized out>) at devices/nm-device.c:8343
#28 0x00007ffff4a4c79a in g_main_context_dispatch () at /lib64/libglib-2.0.so.0
#29 0x00007ffff4a4cae8 in g_main_context_iterate.isra.24 () at /lib64/libglib-2.0.so.0
#30 0x00007ffff4a4cdba in g_main_loop_run () at /lib64/libglib-2.0.so.0
#31 0x000055555559556f in main (argc=1, argv=0x7fffffffdb88) at main.c:518
2015-06-24 13:42:16 +02:00
|
|
|
data->ifindex = ifindex;
|
2017-09-29 15:04:53 +02:00
|
|
|
c_list_link_tail (&priv->link_cb_lst, &data->lst);
|
|
|
|
|
data->idle_id = g_idle_add ((GSourceFunc) _platform_link_cb_idle, data);
|
2014-03-07 19:04:38 +01:00
|
|
|
break;
|
core: delay handling of link-changed platform event in manager
Backtrace:
NetworkManager[10972]: <debug> [1435142179.593334] [platform/nm-platform.c:2962] log_ip4_route(): signal: route 4 removed: 0.0.0.0/0 via 192.168.100.1 dev 85 metric 300 mss 0 src user scope global
NetworkManager[10972]: <debug> [1435142179.593421] [platform/nm-platform.c:2944] log_link(): signal: link removed: 85: bond0 <DOWN;broadcast,multicast,master> mtu 1500 arp 1 bond* init addr 7A:AB:BE:0D:19:3D driver bond
NetworkManager[10972]: <debug> [1435142179.593446] [nm-manager.c:779] remove_device(): (bond0): removing device (allow_unmanage 1, managed 1)
NetworkManager[10972]: <debug> [1435142179.596995] [devices/nm-device.c:7232] nm_device_set_unmanaged(): [0x5555559d2a40] (bond0): now unmanaged
NetworkManager[10972]: (devices/nm-device.c:8040):_set_state_full: runtime check failed: (priv->in_state_changed == FALSE)
#0 0x00007ffff4a538c3 in g_logv () at /lib64/libglib-2.0.so.0
#1 0x00007ffff4a53a3f in g_log () at /lib64/libglib-2.0.so.0
#2 0x00007ffff4a53d56 in g_warn_message () at /lib64/libglib-2.0.so.0
#3 0x00005555555b9dca in _set_state_full (self=0x5555559d2a40, state=NM_DEVICE_STATE_UNMANAGED, reason=NM_DEVICE_STATE_REASON_REMOVED, quitting=0) at devices/nm-device.c:8040
#4 0x0000555555626d7b in remove_device (manager=0x5555559631e0, device=0x5555559d2a40, quitting=0, allow_unmanage=<optimized out>) at nm-manager.c:801
#5 0x00007ffff28b7dac in ffi_call_unix64 () at /lib64/libffi.so.6
#6 0x00007ffff28b76d5 in ffi_call () at /lib64/libffi.so.6
#7 0x00007ffff4d4a628 in g_cclosure_marshal_generic () at /lib64/libgobject-2.0.so.0
#8 0x00007ffff4d49de8 in g_closure_invoke () at /lib64/libgobject-2.0.so.0
#9 0x00007ffff4d5b70d in signal_emit_unlocked_R () at /lib64/libgobject-2.0.so.0
#10 0x00007ffff4d63471 in g_signal_emit_valist () at /lib64/libgobject-2.0.so.0
#11 0x00007ffff4d63c78 in g_signal_emit_by_name () at /lib64/libgobject-2.0.so.0
#12 0x00005555555ce4ea in do_emit_signal (platform=platform@entry=0x55555594c8b0, obj=0x555555a74c50, cache_op=NMP_CACHE_OPS_REMOVED, was_visible=<optimized out>, reason=reason@entry=
NM_PLATFORM_REASON_INTERNAL) at platform/nm-linux-platform.c:1425
#13 0x00005555555ce826 in cache_prune_candidates_prune (platform=platform@entry=0x55555594c8b0) at platform/nm-linux-platform.c:1704
#14 0x00005555555d32d3 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=85, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=0)
at platform/nm-linux-platform.c:1951
#15 0x00005555555d356b in delayed_action_handle_all (ifindex=85, platform=0x55555594c8b0) at platform/nm-linux-platform.c:1491
#16 0x00005555555d356b in delayed_action_handle_all (platform=0x55555594c8b0) at platform/nm-linux-platform.c:1573
#17 0x00005555555d356b in delayed_action_handle_all (platform=platform@entry=0x55555594c8b0, read_netlink=read_netlink@entry=0) at platform/nm-linux-platform.c:1588
#18 0x00005555555d32e2 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=7, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=1)
at platform/nm-linux-platform.c:1954
#19 0x00005555555d5177 in do_change_link (platform=platform@entry=0x55555594c8b0, nlo=nlo@entry=0x55555597f0f0, complete_from_cache=complete_from_cache@entry=1) at platform/nm-linux-platform.c:2753
#20 0x00005555555d56b4 in link_enslave (platform=0x55555594c8b0, master=0, slave=7) at platform/nm-linux-platform.c:3141
#21 0x00005555555976de in release_slave (device=0x5555559d2a40, slave=0x5555559c6be0, configure=<optimized out>) at devices/nm-device-bond.c:437
#22 0x00005555555b7bc3 in nm_device_release_one_slave (self=self@entry=0x5555559d2a40, slave=0x5555559c6be0, configure=configure@entry=1, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:1049
#23 0x00005555555b7f0e in nm_device_master_release_slaves (self=self@entry=0x5555559d2a40) at devices/nm-device.c:1781
#24 0x00005555555b9592 in nm_device_cleanup (self=0x5555559d2a40, reason=<optimized out>, deconfigure=1) at devices/nm-device.c:7752
#25 0x00005555555ba161 in _set_state_full (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED, quitting=quitting@entry=0) at devices/nm-device.c:8128
#26 0x00005555555bb297 in nm_device_state_changed (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:8319
#27 0x00005555555bd9a5 in queued_set_state (user_data=<optimized out>) at devices/nm-device.c:8343
#28 0x00007ffff4a4c79a in g_main_context_dispatch () at /lib64/libglib-2.0.so.0
#29 0x00007ffff4a4cae8 in g_main_context_iterate.isra.24 () at /lib64/libglib-2.0.so.0
#30 0x00007ffff4a4cdba in g_main_loop_run () at /lib64/libglib-2.0.so.0
#31 0x000055555559556f in main (argc=1, argv=0x7fffffffdb88) at main.c:518
2015-06-24 13:42:16 +02:00
|
|
|
default:
|
2014-03-07 19:04:38 +01:00
|
|
|
break;
|
core: delay handling of link-changed platform event in manager
Backtrace:
NetworkManager[10972]: <debug> [1435142179.593334] [platform/nm-platform.c:2962] log_ip4_route(): signal: route 4 removed: 0.0.0.0/0 via 192.168.100.1 dev 85 metric 300 mss 0 src user scope global
NetworkManager[10972]: <debug> [1435142179.593421] [platform/nm-platform.c:2944] log_link(): signal: link removed: 85: bond0 <DOWN;broadcast,multicast,master> mtu 1500 arp 1 bond* init addr 7A:AB:BE:0D:19:3D driver bond
NetworkManager[10972]: <debug> [1435142179.593446] [nm-manager.c:779] remove_device(): (bond0): removing device (allow_unmanage 1, managed 1)
NetworkManager[10972]: <debug> [1435142179.596995] [devices/nm-device.c:7232] nm_device_set_unmanaged(): [0x5555559d2a40] (bond0): now unmanaged
NetworkManager[10972]: (devices/nm-device.c:8040):_set_state_full: runtime check failed: (priv->in_state_changed == FALSE)
#0 0x00007ffff4a538c3 in g_logv () at /lib64/libglib-2.0.so.0
#1 0x00007ffff4a53a3f in g_log () at /lib64/libglib-2.0.so.0
#2 0x00007ffff4a53d56 in g_warn_message () at /lib64/libglib-2.0.so.0
#3 0x00005555555b9dca in _set_state_full (self=0x5555559d2a40, state=NM_DEVICE_STATE_UNMANAGED, reason=NM_DEVICE_STATE_REASON_REMOVED, quitting=0) at devices/nm-device.c:8040
#4 0x0000555555626d7b in remove_device (manager=0x5555559631e0, device=0x5555559d2a40, quitting=0, allow_unmanage=<optimized out>) at nm-manager.c:801
#5 0x00007ffff28b7dac in ffi_call_unix64 () at /lib64/libffi.so.6
#6 0x00007ffff28b76d5 in ffi_call () at /lib64/libffi.so.6
#7 0x00007ffff4d4a628 in g_cclosure_marshal_generic () at /lib64/libgobject-2.0.so.0
#8 0x00007ffff4d49de8 in g_closure_invoke () at /lib64/libgobject-2.0.so.0
#9 0x00007ffff4d5b70d in signal_emit_unlocked_R () at /lib64/libgobject-2.0.so.0
#10 0x00007ffff4d63471 in g_signal_emit_valist () at /lib64/libgobject-2.0.so.0
#11 0x00007ffff4d63c78 in g_signal_emit_by_name () at /lib64/libgobject-2.0.so.0
#12 0x00005555555ce4ea in do_emit_signal (platform=platform@entry=0x55555594c8b0, obj=0x555555a74c50, cache_op=NMP_CACHE_OPS_REMOVED, was_visible=<optimized out>, reason=reason@entry=
NM_PLATFORM_REASON_INTERNAL) at platform/nm-linux-platform.c:1425
#13 0x00005555555ce826 in cache_prune_candidates_prune (platform=platform@entry=0x55555594c8b0) at platform/nm-linux-platform.c:1704
#14 0x00005555555d32d3 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=85, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=0)
at platform/nm-linux-platform.c:1951
#15 0x00005555555d356b in delayed_action_handle_all (ifindex=85, platform=0x55555594c8b0) at platform/nm-linux-platform.c:1491
#16 0x00005555555d356b in delayed_action_handle_all (platform=0x55555594c8b0) at platform/nm-linux-platform.c:1573
#17 0x00005555555d356b in delayed_action_handle_all (platform=platform@entry=0x55555594c8b0, read_netlink=read_netlink@entry=0) at platform/nm-linux-platform.c:1588
#18 0x00005555555d32e2 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=7, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=1)
at platform/nm-linux-platform.c:1954
#19 0x00005555555d5177 in do_change_link (platform=platform@entry=0x55555594c8b0, nlo=nlo@entry=0x55555597f0f0, complete_from_cache=complete_from_cache@entry=1) at platform/nm-linux-platform.c:2753
#20 0x00005555555d56b4 in link_enslave (platform=0x55555594c8b0, master=0, slave=7) at platform/nm-linux-platform.c:3141
#21 0x00005555555976de in release_slave (device=0x5555559d2a40, slave=0x5555559c6be0, configure=<optimized out>) at devices/nm-device-bond.c:437
#22 0x00005555555b7bc3 in nm_device_release_one_slave (self=self@entry=0x5555559d2a40, slave=0x5555559c6be0, configure=configure@entry=1, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:1049
#23 0x00005555555b7f0e in nm_device_master_release_slaves (self=self@entry=0x5555559d2a40) at devices/nm-device.c:1781
#24 0x00005555555b9592 in nm_device_cleanup (self=0x5555559d2a40, reason=<optimized out>, deconfigure=1) at devices/nm-device.c:7752
#25 0x00005555555ba161 in _set_state_full (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED, quitting=quitting@entry=0) at devices/nm-device.c:8128
#26 0x00005555555bb297 in nm_device_state_changed (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:8319
#27 0x00005555555bd9a5 in queued_set_state (user_data=<optimized out>) at devices/nm-device.c:8343
#28 0x00007ffff4a4c79a in g_main_context_dispatch () at /lib64/libglib-2.0.so.0
#29 0x00007ffff4a4cae8 in g_main_context_iterate.isra.24 () at /lib64/libglib-2.0.so.0
#30 0x00007ffff4a4cdba in g_main_loop_run () at /lib64/libglib-2.0.so.0
#31 0x000055555559556f in main (argc=1, argv=0x7fffffffdb88) at main.c:518
2015-06-24 13:42:16 +02:00
|
|
|
}
|
2007-02-08 15:34:26 +00:00
|
|
|
}
|
|
|
|
|
|
2015-05-04 16:54:51 +02:00
|
|
|
static void
|
|
|
|
|
platform_query_devices (NMManager *self)
|
|
|
|
|
{
|
2017-09-29 15:11:33 +02:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2017-07-04 22:29:27 +02:00
|
|
|
gs_unref_ptrarray GPtrArray *links = NULL;
|
2015-05-04 16:54:51 +02:00
|
|
|
int i;
|
device: assume matching connections during first start
Since commit 2d1b85f (th/assume-vs-unmanaged-bgo746440), we clearly
distinguish between two modes when encountering devices with external
IP configuration:
a) external devices. For those devices we generate a volatile in-memory
connection and pretend it's active. However, the device must not be
touched by NetworkManager in any way.
b) assume, seamless take over. Mostly for restart of NetworkManager,
we activate a connection gracefully without going through an down-up
cycle. After the device reaches activated state, the device is
considered fully managed. For this only an existing, non volatile
connection can be used.
Before 'th/assume-vs-unmanaged-bgo746440', the behaviors were not
clearly separated.
Since then, we only choose to assume a connection (b) when the state
file indicates a matching connection. Now, extend this to also assume
connections when:
- during first-start (not after a restart) when there is no
state file yet.
- and, if we have an existing, non volatile, connection which
matches the device's configuration.
This patch lets NetworkManager assume connection also on first start.
That is for example useful when handing over network configuration from
initrd.
This only applies to existing, permanent, matching(!) connections, so it is a
good guess that the user wants NM to take over this interface. This brings us
closer to the previous behavior before 'th/assume-vs-unmanaged-bgo746440'.
https://bugzilla.redhat.com/show_bug.cgi?id=1439220
(cherry picked from commit 27b2477cb7dad2410c88c7dfca51f3aad208b881)
2017-04-19 16:16:12 +02:00
|
|
|
gboolean guess_assume;
|
2017-10-24 08:35:42 +02:00
|
|
|
gs_free char *order = NULL;
|
device: assume matching connections during first start
Since commit 2d1b85f (th/assume-vs-unmanaged-bgo746440), we clearly
distinguish between two modes when encountering devices with external
IP configuration:
a) external devices. For those devices we generate a volatile in-memory
connection and pretend it's active. However, the device must not be
touched by NetworkManager in any way.
b) assume, seamless take over. Mostly for restart of NetworkManager,
we activate a connection gracefully without going through an down-up
cycle. After the device reaches activated state, the device is
considered fully managed. For this only an existing, non volatile
connection can be used.
Before 'th/assume-vs-unmanaged-bgo746440', the behaviors were not
clearly separated.
Since then, we only choose to assume a connection (b) when the state
file indicates a matching connection. Now, extend this to also assume
connections when:
- during first-start (not after a restart) when there is no
state file yet.
- and, if we have an existing, non volatile, connection which
matches the device's configuration.
This patch lets NetworkManager assume connection also on first start.
That is for example useful when handing over network configuration from
initrd.
This only applies to existing, permanent, matching(!) connections, so it is a
good guess that the user wants NM to take over this interface. This brings us
closer to the previous behavior before 'th/assume-vs-unmanaged-bgo746440'.
https://bugzilla.redhat.com/show_bug.cgi?id=1439220
(cherry picked from commit 27b2477cb7dad2410c88c7dfca51f3aad208b881)
2017-04-19 16:16:12 +02:00
|
|
|
|
|
|
|
|
guess_assume = nm_config_get_first_start (nm_config_get ());
|
2017-10-24 08:35:42 +02:00
|
|
|
order = nm_config_data_get_value (NM_CONFIG_GET_DATA,
|
|
|
|
|
NM_CONFIG_KEYFILE_GROUP_MAIN,
|
|
|
|
|
NM_CONFIG_KEYFILE_KEY_MAIN_SLAVES_ORDER,
|
|
|
|
|
NM_CONFIG_GET_VALUE_STRIP);
|
2017-09-29 15:11:33 +02:00
|
|
|
links = nm_platform_link_get_all (priv->platform, !nm_streq0 (order, "index"));
|
2017-07-04 22:29:27 +02:00
|
|
|
if (!links)
|
|
|
|
|
return;
|
|
|
|
|
for (i = 0; i < links->len; i++) {
|
2017-07-05 11:12:59 +02:00
|
|
|
const NMPlatformLink *link = NMP_OBJECT_CAST_LINK (links->pdata[i]);
|
2017-12-06 13:09:31 +01:00
|
|
|
const NMConfigDeviceStateData *dev_state;
|
2016-09-23 17:36:21 +02:00
|
|
|
|
2017-12-06 13:09:31 +01:00
|
|
|
dev_state = nm_config_device_state_get (priv->config, link->ifindex);
|
2016-09-23 17:36:21 +02:00
|
|
|
platform_link_added (self,
|
2017-07-04 22:29:27 +02:00
|
|
|
link->ifindex,
|
|
|
|
|
link,
|
device: assume matching connections during first start
Since commit 2d1b85f (th/assume-vs-unmanaged-bgo746440), we clearly
distinguish between two modes when encountering devices with external
IP configuration:
a) external devices. For those devices we generate a volatile in-memory
connection and pretend it's active. However, the device must not be
touched by NetworkManager in any way.
b) assume, seamless take over. Mostly for restart of NetworkManager,
we activate a connection gracefully without going through an down-up
cycle. After the device reaches activated state, the device is
considered fully managed. For this only an existing, non volatile
connection can be used.
Before 'th/assume-vs-unmanaged-bgo746440', the behaviors were not
clearly separated.
Since then, we only choose to assume a connection (b) when the state
file indicates a matching connection. Now, extend this to also assume
connections when:
- during first-start (not after a restart) when there is no
state file yet.
- and, if we have an existing, non volatile, connection which
matches the device's configuration.
This patch lets NetworkManager assume connection also on first start.
That is for example useful when handing over network configuration from
initrd.
This only applies to existing, permanent, matching(!) connections, so it is a
good guess that the user wants NM to take over this interface. This brings us
closer to the previous behavior before 'th/assume-vs-unmanaged-bgo746440'.
https://bugzilla.redhat.com/show_bug.cgi?id=1439220
(cherry picked from commit 27b2477cb7dad2410c88c7dfca51f3aad208b881)
2017-04-19 16:16:12 +02:00
|
|
|
guess_assume && (!dev_state || !dev_state->connection_uuid),
|
2016-09-23 17:36:21 +02:00
|
|
|
dev_state);
|
|
|
|
|
}
|
2015-05-04 16:54:51 +02:00
|
|
|
}
|
|
|
|
|
|
2008-04-29 Dan Williams <dcbw@redhat.com>
Handle HAL dropouts better; allow NM to start up even if HAL isn't up yet.
* marshallers/nm-marshal.list
- Add marshaller
* src/NetworkManager.c
- (main): let the NMManager handle the NMHalManager
* src/nm-hal-manager.c
src/nm-hal-manager.h
- convert to a GObject, and emit singals when stuff changes. Let the
NMManager handle the signals, instead of the NMHalManager calling
into the NMManager.
* src/nm-manager.c
src/nm-manager.h
- (remove_one_device): consolidate device removals here
- (dispose): use remove_one_device()
- (nm_manager_get_device_by_udi): make static
- (deferred_hal_manager_query_devices): idle handler to query the HAL
manager for devices at startup or wakeup time
- (nm_manager_new): create and monitor the HAL manager
- (hal_manager_udi_added_cb): new function; do what
nm_manager_add_device() used to do when signalled by the hal manager
- (hal_manager_udi_removed_cb): new function; do what
nm_manager_remove_device() used to do when signalled by the hal
manager
- (hal_manager_rfkill_changed_cb): handle rfkill changes from the
hal manager
- (hal_manager_hal_reappeared_cb): when HAL comes back, remove devices
in our device list that aren't known to HAL
- (impl_manager_sleep): on wakeup, re-add devices from an idle handler;
see comments on nm-hal-manager.c::nm_manager_state_changed() a few
commits ago
- (nm_manager_get_device_by_path, nm_manager_is_udi_managed,
nm_manager_activation_pending, nm_manager_wireless_enabled,
nm_manager_wireless_hardware_enabled,
nm_manager_set_wireless_hardware_enabled): remove, unused
git-svn-id: http://svn-archive.gnome.org/svn/NetworkManager/trunk@3619 4912f4e0-d625-0410-9fb7-b9a5a253dbdc
2008-04-29 23:03:00 +00:00
|
|
|
static void
|
2013-05-09 10:24:08 -04:00
|
|
|
rfkill_manager_rfkill_changed_cb (NMRfkillManager *rfkill_mgr,
|
|
|
|
|
RfKillType rtype,
|
|
|
|
|
RfKillState udev_state,
|
|
|
|
|
gpointer user_data)
|
2007-02-08 15:34:26 +00:00
|
|
|
{
|
2009-12-23 00:03:45 -08:00
|
|
|
nm_manager_rfkill_update (NM_MANAGER (user_data), rtype);
|
2007-02-08 15:34:26 +00:00
|
|
|
}
|
|
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
const CList *
|
2008-04-29 Dan Williams <dcbw@redhat.com>
Handle HAL dropouts better; allow NM to start up even if HAL isn't up yet.
* marshallers/nm-marshal.list
- Add marshaller
* src/NetworkManager.c
- (main): let the NMManager handle the NMHalManager
* src/nm-hal-manager.c
src/nm-hal-manager.h
- convert to a GObject, and emit singals when stuff changes. Let the
NMManager handle the signals, instead of the NMHalManager calling
into the NMManager.
* src/nm-manager.c
src/nm-manager.h
- (remove_one_device): consolidate device removals here
- (dispose): use remove_one_device()
- (nm_manager_get_device_by_udi): make static
- (deferred_hal_manager_query_devices): idle handler to query the HAL
manager for devices at startup or wakeup time
- (nm_manager_new): create and monitor the HAL manager
- (hal_manager_udi_added_cb): new function; do what
nm_manager_add_device() used to do when signalled by the hal manager
- (hal_manager_udi_removed_cb): new function; do what
nm_manager_remove_device() used to do when signalled by the hal
manager
- (hal_manager_rfkill_changed_cb): handle rfkill changes from the
hal manager
- (hal_manager_hal_reappeared_cb): when HAL comes back, remove devices
in our device list that aren't known to HAL
- (impl_manager_sleep): on wakeup, re-add devices from an idle handler;
see comments on nm-hal-manager.c::nm_manager_state_changed() a few
commits ago
- (nm_manager_get_device_by_path, nm_manager_is_udi_managed,
nm_manager_activation_pending, nm_manager_wireless_enabled,
nm_manager_wireless_hardware_enabled,
nm_manager_set_wireless_hardware_enabled): remove, unused
git-svn-id: http://svn-archive.gnome.org/svn/NetworkManager/trunk@3619 4912f4e0-d625-0410-9fb7-b9a5a253dbdc
2008-04-29 23:03:00 +00:00
|
|
|
nm_manager_get_devices (NMManager *manager)
|
2007-02-08 15:34:26 +00:00
|
|
|
{
|
|
|
|
|
g_return_val_if_fail (NM_IS_MANAGER (manager), NULL);
|
|
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
return &NM_MANAGER_GET_PRIVATE (manager)->devices_lst_head;
|
2007-02-08 15:34:26 +00:00
|
|
|
}
|
|
|
|
|
|
core: improve selection of device when activating profile on any device
With
$ nmcli connection up "$PROFILE" ifname "$DEVICE"
it's clear that the user means the particular device. That also
is taken as a indication to make $DEVICE as managed, in case it was
unmanaged before. So, this command implies a previous
$ nmcli device set $DEVICE managed yes
On the other hand, if the user just issues
$ nmcli connection up "$PROFILE"
without a particular device, then we should prefer devices which
are marked as managed instead of unmanaged once.
Likewise, we should consider the device's state when selecting
a device. This means, when activating a profile which is activatable on
multiple devices, it will now prefer devices which are not already
active. The exception to this is that if the profile itself is already
active (and multi-connect "single"), then it will prefer to re-activate
the profile on the same device. This was done previously already. What's
new is that if the the profile is not multi-connect "single", the said
exception no longer applies, and we prefer to activate the profile on a
hitherto unactivated device.
https://bugzilla.redhat.com/show_bug.cgi?id=1639254
https://github.com/NetworkManager/NetworkManager/pull/232
2018-10-15 15:23:22 +02:00
|
|
|
typedef enum {
|
|
|
|
|
DEVICE_ACTIVATION_PRIO_NONE,
|
|
|
|
|
DEVICE_ACTIVATION_PRIO_UNMANAGED,
|
|
|
|
|
DEVICE_ACTIVATION_PRIO_UNAVAILABLE,
|
|
|
|
|
DEVICE_ACTIVATION_PRIO_DEACTIVATING,
|
|
|
|
|
DEVICE_ACTIVATION_PRIO_ACTIVATING,
|
|
|
|
|
DEVICE_ACTIVATION_PRIO_ACTIVATED,
|
|
|
|
|
DEVICE_ACTIVATION_PRIO_DISCONNECTED,
|
|
|
|
|
|
|
|
|
|
_DEVICE_ACTIVATION_PRIO_BEST = DEVICE_ACTIVATION_PRIO_DISCONNECTED,
|
|
|
|
|
} DeviceActivationPrio;
|
|
|
|
|
|
|
|
|
|
static DeviceActivationPrio
|
|
|
|
|
_device_get_activation_prio (NMDevice *device)
|
|
|
|
|
{
|
|
|
|
|
if (!nm_device_get_managed (device, TRUE))
|
|
|
|
|
return DEVICE_ACTIVATION_PRIO_NONE;
|
|
|
|
|
|
|
|
|
|
switch (nm_device_get_state (device)) {
|
|
|
|
|
case NM_DEVICE_STATE_DISCONNECTED:
|
|
|
|
|
return DEVICE_ACTIVATION_PRIO_DISCONNECTED;
|
|
|
|
|
case NM_DEVICE_STATE_ACTIVATED:
|
|
|
|
|
return DEVICE_ACTIVATION_PRIO_ACTIVATED;
|
|
|
|
|
case NM_DEVICE_STATE_PREPARE:
|
|
|
|
|
case NM_DEVICE_STATE_CONFIG:
|
|
|
|
|
case NM_DEVICE_STATE_NEED_AUTH:
|
|
|
|
|
case NM_DEVICE_STATE_IP_CONFIG:
|
|
|
|
|
case NM_DEVICE_STATE_IP_CHECK:
|
|
|
|
|
case NM_DEVICE_STATE_SECONDARIES:
|
|
|
|
|
return DEVICE_ACTIVATION_PRIO_ACTIVATING;
|
|
|
|
|
case NM_DEVICE_STATE_DEACTIVATING:
|
|
|
|
|
case NM_DEVICE_STATE_FAILED:
|
|
|
|
|
return DEVICE_ACTIVATION_PRIO_DEACTIVATING;
|
|
|
|
|
case NM_DEVICE_STATE_UNAVAILABLE:
|
|
|
|
|
return DEVICE_ACTIVATION_PRIO_UNAVAILABLE;
|
|
|
|
|
case NM_DEVICE_STATE_UNKNOWN:
|
|
|
|
|
case NM_DEVICE_STATE_UNMANAGED:
|
|
|
|
|
return DEVICE_ACTIVATION_PRIO_UNMANAGED;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
g_return_val_if_reached (DEVICE_ACTIVATION_PRIO_UNAVAILABLE);
|
|
|
|
|
}
|
|
|
|
|
|
2015-02-03 16:15:37 +01:00
|
|
|
static NMDevice *
|
|
|
|
|
nm_manager_get_best_device_for_connection (NMManager *self,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *sett_conn,
|
2015-10-19 14:14:17 +02:00
|
|
|
NMConnection *connection,
|
2017-02-24 09:08:02 +01:00
|
|
|
gboolean for_user_request,
|
2018-07-10 11:50:21 +02:00
|
|
|
GHashTable *unavailable_devices,
|
|
|
|
|
GError **error)
|
2015-02-03 16:15:37 +01:00
|
|
|
{
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2018-04-22 15:21:52 +02:00
|
|
|
NMActiveConnectionState ac_state;
|
2017-03-13 14:01:47 +01:00
|
|
|
NMActiveConnection *ac;
|
2018-04-22 15:21:52 +02:00
|
|
|
NMDevice *ac_device;
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMDevice *device;
|
core: improve selection of device when activating profile on any device
With
$ nmcli connection up "$PROFILE" ifname "$DEVICE"
it's clear that the user means the particular device. That also
is taken as a indication to make $DEVICE as managed, in case it was
unmanaged before. So, this command implies a previous
$ nmcli device set $DEVICE managed yes
On the other hand, if the user just issues
$ nmcli connection up "$PROFILE"
without a particular device, then we should prefer devices which
are marked as managed instead of unmanaged once.
Likewise, we should consider the device's state when selecting
a device. This means, when activating a profile which is activatable on
multiple devices, it will now prefer devices which are not already
active. The exception to this is that if the profile itself is already
active (and multi-connect "single"), then it will prefer to re-activate
the profile on the same device. This was done previously already. What's
new is that if the the profile is not multi-connect "single", the said
exception no longer applies, and we prefer to activate the profile on a
hitherto unactivated device.
https://bugzilla.redhat.com/show_bug.cgi?id=1639254
https://github.com/NetworkManager/NetworkManager/pull/232
2018-10-15 15:23:22 +02:00
|
|
|
struct {
|
|
|
|
|
NMDevice *device;
|
|
|
|
|
DeviceActivationPrio prio;
|
|
|
|
|
} best = {
|
|
|
|
|
.device = NULL,
|
|
|
|
|
.prio = DEVICE_ACTIVATION_PRIO_NONE,
|
|
|
|
|
};
|
2015-10-19 14:14:17 +02:00
|
|
|
NMDeviceCheckConAvailableFlags flags;
|
2018-04-22 15:21:52 +02:00
|
|
|
gs_unref_ptrarray GPtrArray *all_ac_arr = NULL;
|
2018-07-10 11:50:21 +02:00
|
|
|
gs_free_error GError *local_best = NULL;
|
2018-10-17 12:21:33 +02:00
|
|
|
NMConnectionMultiConnect multi_connect;
|
2015-02-03 16:15:37 +01:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
nm_assert (!sett_conn || NM_IS_SETTINGS_CONNECTION (sett_conn));
|
|
|
|
|
nm_assert (!connection || NM_IS_CONNECTION (connection));
|
|
|
|
|
nm_assert (sett_conn || connection);
|
|
|
|
|
nm_assert (!connection || !sett_conn || connection == nm_settings_connection_get_connection (sett_conn));
|
|
|
|
|
|
|
|
|
|
if (!connection)
|
|
|
|
|
connection = nm_settings_connection_get_connection (sett_conn);
|
|
|
|
|
|
2018-10-17 12:21:33 +02:00
|
|
|
multi_connect = _nm_connection_get_multi_connect (connection);
|
|
|
|
|
|
core: ignore unmanaged devices for explicit activation request depending on multi-connect
When a device is unmanaged, an explicit activation request can
still activate it. In particular, that is the case for
$ nmcli connection up "$PROFILE" ifname "$DEVICE"
It is also the case, for plain
$ nmcli connection up "$PROFILE"
where NetworkManager searches for a suitable device -- depending on
multi-connect setting of the profile.
The idea is, that a profile with "multi-connect=single" is expected
to sufficently and uniquely match a device, based on matching properties
like "connection.interface-name". In that case, an explicit activation
request from the user shows the intent to manage the device.
Note that it's hard to understand whether the profile really uniquely
selects a particular device. For example, if the profile doesn't specify
"connection.interface-name", it might still uniquely identify
an ethernet device, if you only have one such device.
On the other hand, with "connection.multi-connect" other than "single",
it is very much expected that the profile does not strictly match
one device.
Change the behavior here for multi-connect profiles. This allows the
user to block individual devices from activation via
$ nmcli device set "$DEVICE" managed not
A subsequent
$ nmcli connection up "$MULTI_PROFILE"
will not consider "$DEVICE" as suitable candidate for activation.
Likewise, in the future we may want to add a
$ nmcli connection up --all "$MULTI_PROFILE"
command, to activate the profile on all suitable device.
In that case again, unmanaged devices probably also should be skipped
for multi-connect profiles.
https://bugzilla.redhat.com/show_bug.cgi?id=1639254
2018-10-17 11:33:02 +02:00
|
|
|
if (!for_user_request)
|
|
|
|
|
flags = NM_DEVICE_CHECK_CON_AVAILABLE_NONE;
|
|
|
|
|
else {
|
|
|
|
|
/* if the profile is multi-connect=single, we also consider devices which
|
|
|
|
|
* are marked as unmanaged. And explicit user-request shows sufficent user
|
|
|
|
|
* intent to make the device managed.
|
|
|
|
|
* That is also, because we expect that such profile is suitably tied
|
|
|
|
|
* to the intended device. So when an unmanaged device matches, the user's
|
|
|
|
|
* intent is clear.
|
|
|
|
|
*
|
|
|
|
|
* For multi-connect != single devices that is different. The profile
|
|
|
|
|
* is not restricted to a particular device.
|
|
|
|
|
* For that reason, plain `nmcli connection up "$MULIT_PROFILE"` seems
|
|
|
|
|
* less suitable for multi-connect profiles, because the target device is
|
|
|
|
|
* left unspecified. Anyway, if a user issues
|
|
|
|
|
*
|
|
|
|
|
* $ nmcli device set "$DEVICE" managed no
|
|
|
|
|
* $ nmcli connection up "$MULIT_PROFILE"
|
|
|
|
|
*
|
|
|
|
|
* then it is reasonable for multi-connect profiles to not consider
|
|
|
|
|
* the device a suitable candidate.
|
|
|
|
|
*
|
|
|
|
|
* This may be seen inconsistent, but I think that it makes a lot of
|
|
|
|
|
* sense. Also note that "connection.multi-connect" work quite differently
|
|
|
|
|
* in aspects like activation. E.g. `nmcli connection up` of multi-connect
|
|
|
|
|
* "single" profile, will deactivate the profile if it is active already.
|
|
|
|
|
* That is different from multi-connect profiles, where it will aim to
|
|
|
|
|
* activate the profile one more time on an hitherto disconnected device.
|
|
|
|
|
*/
|
|
|
|
|
if (multi_connect == NM_CONNECTION_MULTI_CONNECT_SINGLE)
|
|
|
|
|
flags = NM_DEVICE_CHECK_CON_AVAILABLE_FOR_USER_REQUEST;
|
|
|
|
|
else
|
|
|
|
|
flags = NM_DEVICE_CHECK_CON_AVAILABLE_FOR_USER_REQUEST & ~_NM_DEVICE_CHECK_CON_AVAILABLE_FOR_USER_REQUEST_OVERRULE_UNMANAGED;
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-17 12:21:33 +02:00
|
|
|
if ( multi_connect == NM_CONNECTION_MULTI_CONNECT_SINGLE
|
core: improve selection of device when activating profile on any device
With
$ nmcli connection up "$PROFILE" ifname "$DEVICE"
it's clear that the user means the particular device. That also
is taken as a indication to make $DEVICE as managed, in case it was
unmanaged before. So, this command implies a previous
$ nmcli device set $DEVICE managed yes
On the other hand, if the user just issues
$ nmcli connection up "$PROFILE"
without a particular device, then we should prefer devices which
are marked as managed instead of unmanaged once.
Likewise, we should consider the device's state when selecting
a device. This means, when activating a profile which is activatable on
multiple devices, it will now prefer devices which are not already
active. The exception to this is that if the profile itself is already
active (and multi-connect "single"), then it will prefer to re-activate
the profile on the same device. This was done previously already. What's
new is that if the the profile is not multi-connect "single", the said
exception no longer applies, and we prefer to activate the profile on a
hitherto unactivated device.
https://bugzilla.redhat.com/show_bug.cgi?id=1639254
https://github.com/NetworkManager/NetworkManager/pull/232
2018-10-15 15:23:22 +02:00
|
|
|
&& (ac = active_connection_find_by_connection (self, sett_conn, connection, NM_ACTIVE_CONNECTION_STATE_DEACTIVATING, &all_ac_arr))) {
|
|
|
|
|
/* if we have a profile which may activate on only one device (multi-connect single), then
|
|
|
|
|
* we prefer the device on which the profile is already active. It means to reactivate
|
|
|
|
|
* the profile on the same device.
|
|
|
|
|
*
|
|
|
|
|
* If the profile can be activated on multiple devices, we don't do this. In fact, the
|
|
|
|
|
* check below for the DeviceActivationPrio will prefer devices which are not already
|
|
|
|
|
* activated (with this or another) profile. */
|
2015-02-03 16:15:37 +01:00
|
|
|
|
2018-04-22 15:21:52 +02:00
|
|
|
ac_device = nm_active_connection_get_device (ac);
|
|
|
|
|
if ( ac_device
|
|
|
|
|
&& ( (unavailable_devices && g_hash_table_contains (unavailable_devices, ac_device))
|
2018-06-27 16:21:43 +02:00
|
|
|
|| !nm_device_check_connection_available (ac_device, connection, flags, NULL, NULL)))
|
2018-04-22 15:21:52 +02:00
|
|
|
ac_device = NULL;
|
|
|
|
|
|
|
|
|
|
if (all_ac_arr) {
|
|
|
|
|
guint i;
|
|
|
|
|
|
|
|
|
|
ac_state = nm_active_connection_get_state (ac);
|
|
|
|
|
|
|
|
|
|
/* we found several active connections. See which one is the most suitable... */
|
|
|
|
|
nm_assert (ac == all_ac_arr->pdata[0]);
|
|
|
|
|
for (i = 1; i < all_ac_arr->len; i++) {
|
|
|
|
|
NMActiveConnection *ac2 = all_ac_arr->pdata[i];
|
|
|
|
|
NMDevice *ac_device2 = nm_active_connection_get_device (ac2);
|
|
|
|
|
NMActiveConnectionState ac_state2;
|
|
|
|
|
|
|
|
|
|
if ( !ac_device2
|
|
|
|
|
|| (unavailable_devices && g_hash_table_contains (unavailable_devices, ac_device2))
|
2018-06-27 16:21:43 +02:00
|
|
|
|| !nm_device_check_connection_available (ac_device2, connection, flags, NULL, NULL))
|
2018-04-22 15:21:52 +02:00
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
ac_state2 = nm_active_connection_get_state (ac2);
|
|
|
|
|
|
|
|
|
|
if (!ac_device)
|
|
|
|
|
goto found_better;
|
|
|
|
|
|
|
|
|
|
if (ac_state == ac_state2) {
|
|
|
|
|
/* active-connections are in their list in the order in which they are connected.
|
|
|
|
|
* If we have two with same state, the later (newer) one is preferred. */
|
|
|
|
|
goto found_better;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
switch (ac_state) {
|
|
|
|
|
case NM_ACTIVE_CONNECTION_STATE_UNKNOWN:
|
|
|
|
|
if (NM_IN_SET (ac_state2, NM_ACTIVE_CONNECTION_STATE_ACTIVATING, NM_ACTIVE_CONNECTION_STATE_ACTIVATED, NM_ACTIVE_CONNECTION_STATE_DEACTIVATING))
|
|
|
|
|
goto found_better;
|
|
|
|
|
break;
|
|
|
|
|
case NM_ACTIVE_CONNECTION_STATE_ACTIVATING:
|
|
|
|
|
if (NM_IN_SET (ac_state2, NM_ACTIVE_CONNECTION_STATE_ACTIVATED))
|
|
|
|
|
goto found_better;
|
|
|
|
|
break;
|
|
|
|
|
case NM_ACTIVE_CONNECTION_STATE_ACTIVATED:
|
|
|
|
|
break;
|
|
|
|
|
case NM_ACTIVE_CONNECTION_STATE_DEACTIVATING:
|
|
|
|
|
if (NM_IN_SET (ac_state2, NM_ACTIVE_CONNECTION_STATE_ACTIVATING, NM_ACTIVE_CONNECTION_STATE_ACTIVATED))
|
|
|
|
|
goto found_better;
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
nm_assert_not_reached ();
|
|
|
|
|
goto found_better;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
continue;
|
|
|
|
|
found_better:
|
|
|
|
|
ac = ac2;
|
|
|
|
|
ac_state = ac_state2;
|
|
|
|
|
ac_device = ac_device2;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ac_device)
|
|
|
|
|
return ac_device;
|
|
|
|
|
}
|
2015-10-19 14:14:17 +02:00
|
|
|
|
2015-02-03 16:15:37 +01:00
|
|
|
/* Pick the first device that's compatible with the connection. */
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst) {
|
2018-07-10 11:50:21 +02:00
|
|
|
GError *local = NULL;
|
core: improve selection of device when activating profile on any device
With
$ nmcli connection up "$PROFILE" ifname "$DEVICE"
it's clear that the user means the particular device. That also
is taken as a indication to make $DEVICE as managed, in case it was
unmanaged before. So, this command implies a previous
$ nmcli device set $DEVICE managed yes
On the other hand, if the user just issues
$ nmcli connection up "$PROFILE"
without a particular device, then we should prefer devices which
are marked as managed instead of unmanaged once.
Likewise, we should consider the device's state when selecting
a device. This means, when activating a profile which is activatable on
multiple devices, it will now prefer devices which are not already
active. The exception to this is that if the profile itself is already
active (and multi-connect "single"), then it will prefer to re-activate
the profile on the same device. This was done previously already. What's
new is that if the the profile is not multi-connect "single", the said
exception no longer applies, and we prefer to activate the profile on a
hitherto unactivated device.
https://bugzilla.redhat.com/show_bug.cgi?id=1639254
https://github.com/NetworkManager/NetworkManager/pull/232
2018-10-15 15:23:22 +02:00
|
|
|
DeviceActivationPrio prio;
|
2015-02-03 16:15:37 +01:00
|
|
|
|
2018-07-10 11:50:21 +02:00
|
|
|
if ( unavailable_devices
|
|
|
|
|
&& g_hash_table_contains (unavailable_devices, device))
|
2017-02-24 09:08:02 +01:00
|
|
|
continue;
|
|
|
|
|
|
core: improve selection of device when activating profile on any device
With
$ nmcli connection up "$PROFILE" ifname "$DEVICE"
it's clear that the user means the particular device. That also
is taken as a indication to make $DEVICE as managed, in case it was
unmanaged before. So, this command implies a previous
$ nmcli device set $DEVICE managed yes
On the other hand, if the user just issues
$ nmcli connection up "$PROFILE"
without a particular device, then we should prefer devices which
are marked as managed instead of unmanaged once.
Likewise, we should consider the device's state when selecting
a device. This means, when activating a profile which is activatable on
multiple devices, it will now prefer devices which are not already
active. The exception to this is that if the profile itself is already
active (and multi-connect "single"), then it will prefer to re-activate
the profile on the same device. This was done previously already. What's
new is that if the the profile is not multi-connect "single", the said
exception no longer applies, and we prefer to activate the profile on a
hitherto unactivated device.
https://bugzilla.redhat.com/show_bug.cgi?id=1639254
https://github.com/NetworkManager/NetworkManager/pull/232
2018-10-15 15:23:22 +02:00
|
|
|
/* determine the priority of this device. Currently this priority is independent
|
|
|
|
|
* of the profile (connection) and the device's details (aside the state).
|
2018-10-17 12:21:49 +02:00
|
|
|
*
|
core: improve selection of device when activating profile on any device
With
$ nmcli connection up "$PROFILE" ifname "$DEVICE"
it's clear that the user means the particular device. That also
is taken as a indication to make $DEVICE as managed, in case it was
unmanaged before. So, this command implies a previous
$ nmcli device set $DEVICE managed yes
On the other hand, if the user just issues
$ nmcli connection up "$PROFILE"
without a particular device, then we should prefer devices which
are marked as managed instead of unmanaged once.
Likewise, we should consider the device's state when selecting
a device. This means, when activating a profile which is activatable on
multiple devices, it will now prefer devices which are not already
active. The exception to this is that if the profile itself is already
active (and multi-connect "single"), then it will prefer to re-activate
the profile on the same device. This was done previously already. What's
new is that if the the profile is not multi-connect "single", the said
exception no longer applies, and we prefer to activate the profile on a
hitherto unactivated device.
https://bugzilla.redhat.com/show_bug.cgi?id=1639254
https://github.com/NetworkManager/NetworkManager/pull/232
2018-10-15 15:23:22 +02:00
|
|
|
* Maybe nm_device_check_connection_available() should instead return a priority,
|
2018-10-17 12:21:49 +02:00
|
|
|
* as it has more information available.
|
|
|
|
|
*
|
|
|
|
|
* For example, if you have multiple Wi-Fi devices, currently a user-request would
|
|
|
|
|
* also select the device if the AP is not visible. Optimally, if one of the two
|
|
|
|
|
* devices sees the AP and the other one doesn't, the former would be preferred.
|
|
|
|
|
* For that, the priority would need to be determined by nm_device_check_connection_available(). */
|
core: improve selection of device when activating profile on any device
With
$ nmcli connection up "$PROFILE" ifname "$DEVICE"
it's clear that the user means the particular device. That also
is taken as a indication to make $DEVICE as managed, in case it was
unmanaged before. So, this command implies a previous
$ nmcli device set $DEVICE managed yes
On the other hand, if the user just issues
$ nmcli connection up "$PROFILE"
without a particular device, then we should prefer devices which
are marked as managed instead of unmanaged once.
Likewise, we should consider the device's state when selecting
a device. This means, when activating a profile which is activatable on
multiple devices, it will now prefer devices which are not already
active. The exception to this is that if the profile itself is already
active (and multi-connect "single"), then it will prefer to re-activate
the profile on the same device. This was done previously already. What's
new is that if the the profile is not multi-connect "single", the said
exception no longer applies, and we prefer to activate the profile on a
hitherto unactivated device.
https://bugzilla.redhat.com/show_bug.cgi?id=1639254
https://github.com/NetworkManager/NetworkManager/pull/232
2018-10-15 15:23:22 +02:00
|
|
|
prio = _device_get_activation_prio (device);
|
|
|
|
|
if ( prio <= best.prio
|
|
|
|
|
&& best.device) {
|
|
|
|
|
/* we already have a matching device with a better priority. This candidate
|
2018-10-17 12:21:49 +02:00
|
|
|
* cannot be better. Skip the check.
|
|
|
|
|
*
|
|
|
|
|
* Also note, that below we collect the best error message @local_best.
|
|
|
|
|
* Since we already have best.device, the error message does not matter
|
|
|
|
|
* either, and we can skip nm_device_check_connection_available() altogether. */
|
core: improve selection of device when activating profile on any device
With
$ nmcli connection up "$PROFILE" ifname "$DEVICE"
it's clear that the user means the particular device. That also
is taken as a indication to make $DEVICE as managed, in case it was
unmanaged before. So, this command implies a previous
$ nmcli device set $DEVICE managed yes
On the other hand, if the user just issues
$ nmcli connection up "$PROFILE"
without a particular device, then we should prefer devices which
are marked as managed instead of unmanaged once.
Likewise, we should consider the device's state when selecting
a device. This means, when activating a profile which is activatable on
multiple devices, it will now prefer devices which are not already
active. The exception to this is that if the profile itself is already
active (and multi-connect "single"), then it will prefer to re-activate
the profile on the same device. This was done previously already. What's
new is that if the the profile is not multi-connect "single", the said
exception no longer applies, and we prefer to activate the profile on a
hitherto unactivated device.
https://bugzilla.redhat.com/show_bug.cgi?id=1639254
https://github.com/NetworkManager/NetworkManager/pull/232
2018-10-15 15:23:22 +02:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-10 11:50:21 +02:00
|
|
|
if (nm_device_check_connection_available (device,
|
|
|
|
|
connection,
|
|
|
|
|
flags,
|
|
|
|
|
NULL,
|
core: improve selection of device when activating profile on any device
With
$ nmcli connection up "$PROFILE" ifname "$DEVICE"
it's clear that the user means the particular device. That also
is taken as a indication to make $DEVICE as managed, in case it was
unmanaged before. So, this command implies a previous
$ nmcli device set $DEVICE managed yes
On the other hand, if the user just issues
$ nmcli connection up "$PROFILE"
without a particular device, then we should prefer devices which
are marked as managed instead of unmanaged once.
Likewise, we should consider the device's state when selecting
a device. This means, when activating a profile which is activatable on
multiple devices, it will now prefer devices which are not already
active. The exception to this is that if the profile itself is already
active (and multi-connect "single"), then it will prefer to re-activate
the profile on the same device. This was done previously already. What's
new is that if the the profile is not multi-connect "single", the said
exception no longer applies, and we prefer to activate the profile on a
hitherto unactivated device.
https://bugzilla.redhat.com/show_bug.cgi?id=1639254
https://github.com/NetworkManager/NetworkManager/pull/232
2018-10-15 15:23:22 +02:00
|
|
|
error ? &local : NULL)) {
|
|
|
|
|
if (prio == _DEVICE_ACTIVATION_PRIO_BEST) {
|
|
|
|
|
/* this device already has the best priority. It cannot get better
|
|
|
|
|
* and finish the search. */
|
|
|
|
|
return device;
|
|
|
|
|
}
|
|
|
|
|
best.prio = prio;
|
|
|
|
|
best.device = device;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2018-07-10 11:50:21 +02:00
|
|
|
|
|
|
|
|
if (error) {
|
|
|
|
|
gboolean reset_error;
|
|
|
|
|
|
|
|
|
|
if (!local_best)
|
|
|
|
|
reset_error = TRUE;
|
|
|
|
|
else if (local_best->domain != NM_UTILS_ERROR)
|
|
|
|
|
reset_error = (local->domain == NM_UTILS_ERROR);
|
|
|
|
|
else {
|
|
|
|
|
reset_error = ( local->domain == NM_UTILS_ERROR
|
|
|
|
|
&& local_best->code < local->code);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (reset_error) {
|
|
|
|
|
g_clear_error (&local_best);
|
|
|
|
|
g_set_error (&local_best,
|
|
|
|
|
local->domain,
|
|
|
|
|
local->code,
|
|
|
|
|
"device %s not available because %s",
|
|
|
|
|
nm_device_get_iface (device),
|
|
|
|
|
local->message);
|
|
|
|
|
}
|
|
|
|
|
g_error_free (local);
|
|
|
|
|
}
|
2015-02-03 16:15:37 +01:00
|
|
|
}
|
|
|
|
|
|
core: improve selection of device when activating profile on any device
With
$ nmcli connection up "$PROFILE" ifname "$DEVICE"
it's clear that the user means the particular device. That also
is taken as a indication to make $DEVICE as managed, in case it was
unmanaged before. So, this command implies a previous
$ nmcli device set $DEVICE managed yes
On the other hand, if the user just issues
$ nmcli connection up "$PROFILE"
without a particular device, then we should prefer devices which
are marked as managed instead of unmanaged once.
Likewise, we should consider the device's state when selecting
a device. This means, when activating a profile which is activatable on
multiple devices, it will now prefer devices which are not already
active. The exception to this is that if the profile itself is already
active (and multi-connect "single"), then it will prefer to re-activate
the profile on the same device. This was done previously already. What's
new is that if the the profile is not multi-connect "single", the said
exception no longer applies, and we prefer to activate the profile on a
hitherto unactivated device.
https://bugzilla.redhat.com/show_bug.cgi?id=1639254
https://github.com/NetworkManager/NetworkManager/pull/232
2018-10-15 15:23:22 +02:00
|
|
|
if (best.device)
|
|
|
|
|
return best.device;
|
|
|
|
|
|
2018-07-10 11:50:21 +02:00
|
|
|
if (error) {
|
|
|
|
|
if (local_best)
|
|
|
|
|
g_propagate_error (error, g_steal_pointer (&local_best));
|
|
|
|
|
else {
|
|
|
|
|
nm_utils_error_set_literal (error,
|
|
|
|
|
NM_UTILS_ERROR_UNKNOWN,
|
|
|
|
|
"no suitable device found");
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-02-03 16:15:37 +01:00
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
static const char **
|
|
|
|
|
_get_devices_paths (NMManager *self,
|
|
|
|
|
gboolean all_devices)
|
2007-08-26 15:55:27 +00:00
|
|
|
{
|
2015-04-15 14:53:30 -04:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
const char **paths = NULL;
|
2015-08-18 13:42:20 +02:00
|
|
|
guint i;
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMDevice *device;
|
2007-08-26 15:55:27 +00:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
paths = g_new (const char *, c_list_length (&priv->devices_lst_head) + 1);
|
2015-08-18 13:42:20 +02:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
i = 0;
|
|
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst) {
|
2015-08-18 13:42:20 +02:00
|
|
|
const char *path;
|
2007-08-26 15:55:27 +00:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
path = nm_dbus_object_get_path (NM_DBUS_OBJECT (device));
|
|
|
|
|
if (!path)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
if ( !all_devices
|
|
|
|
|
&& !nm_device_is_real (device))
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
paths[i++] = path;
|
2015-08-18 13:42:20 +02:00
|
|
|
}
|
|
|
|
|
paths[i++] = NULL;
|
2015-04-15 14:53:30 -04:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
return paths;
|
2007-08-26 15:55:27 +00:00
|
|
|
}
|
|
|
|
|
|
2014-10-06 11:21:54 -05:00
|
|
|
static void
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
impl_manager_get_devices (NMDBusObject *obj,
|
|
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
|
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
2014-10-06 11:21:54 -05:00
|
|
|
{
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
NMManager *self = NM_MANAGER (obj);
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
gs_free const char **paths = NULL;
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
paths = _get_devices_paths (self, FALSE);
|
|
|
|
|
g_dbus_method_invocation_return_value (invocation,
|
|
|
|
|
g_variant_new ("(^ao)", (char **) paths));
|
2014-10-06 11:21:54 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
impl_manager_get_all_devices (NMDBusObject *obj,
|
|
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
|
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
2014-10-06 11:21:54 -05:00
|
|
|
{
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
NMManager *self = NM_MANAGER (obj);
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
gs_free const char **paths = NULL;
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
paths = _get_devices_paths (self, TRUE);
|
|
|
|
|
g_dbus_method_invocation_return_value (invocation,
|
|
|
|
|
g_variant_new ("(^ao)", (char **) paths));
|
2014-10-06 11:21:54 -05:00
|
|
|
}
|
|
|
|
|
|
2015-04-15 14:53:30 -04:00
|
|
|
static void
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
impl_manager_get_device_by_ip_iface (NMDBusObject *obj,
|
|
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
|
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
|
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER (obj);
|
2011-04-22 12:27:55 -05:00
|
|
|
NMDevice *device;
|
|
|
|
|
const char *path = NULL;
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
const char *iface;
|
|
|
|
|
|
|
|
|
|
g_variant_get (parameters, "(&s)", &iface);
|
2011-04-22 12:27:55 -05:00
|
|
|
|
|
|
|
|
device = find_device_by_ip_iface (self, iface);
|
2015-04-15 14:53:30 -04:00
|
|
|
if (device)
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
path = nm_dbus_object_get_path (NM_DBUS_OBJECT (device));
|
2011-04-22 12:27:55 -05:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
if (!path) {
|
|
|
|
|
g_dbus_method_invocation_return_error (invocation,
|
2015-04-15 14:53:30 -04:00
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_UNKNOWN_DEVICE,
|
|
|
|
|
"No device found for the requested iface.");
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
return;
|
2011-04-22 12:27:55 -05:00
|
|
|
}
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
|
|
|
|
|
g_dbus_method_invocation_return_value (invocation,
|
|
|
|
|
g_variant_new ("(o)", path));
|
2011-04-22 12:27:55 -05:00
|
|
|
}
|
|
|
|
|
|
2014-01-29 13:22:53 -05:00
|
|
|
static gboolean
|
|
|
|
|
is_compatible_with_slave (NMConnection *master, NMConnection *slave)
|
|
|
|
|
{
|
|
|
|
|
NMSettingConnection *s_con;
|
|
|
|
|
|
|
|
|
|
g_return_val_if_fail (master, FALSE);
|
|
|
|
|
g_return_val_if_fail (slave, FALSE);
|
|
|
|
|
|
|
|
|
|
s_con = nm_connection_get_setting_connection (slave);
|
|
|
|
|
g_assert (s_con);
|
|
|
|
|
|
|
|
|
|
return nm_connection_is_type (master, nm_setting_connection_get_slave_type (s_con));
|
|
|
|
|
}
|
|
|
|
|
|
2012-02-26 17:27:42 -06:00
|
|
|
/**
|
|
|
|
|
* find_master:
|
|
|
|
|
* @self: #NMManager object
|
|
|
|
|
* @connection: the #NMConnection to find the master connection and device for
|
|
|
|
|
* @device: the #NMDevice, if any, which will activate @connection
|
|
|
|
|
* @out_master_connection: on success, the master connection of @connection if
|
|
|
|
|
* that master connection was found
|
|
|
|
|
* @out_master_device: on success, the master device of @connection if that
|
|
|
|
|
* master device was found
|
2014-01-28 17:24:26 -05:00
|
|
|
* @out_master_ac: on success, the master ActiveConnection of @connection if
|
|
|
|
|
* there already is one
|
2014-01-29 11:48:03 -05:00
|
|
|
* @error: the error, if an error occurred
|
2012-02-26 17:27:42 -06:00
|
|
|
*
|
2014-01-29 13:22:53 -05:00
|
|
|
* Given an #NMConnection, attempts to find its master. If @connection has
|
|
|
|
|
* no master, this will return %TRUE and @out_master_connection and
|
|
|
|
|
* @out_master_device will be untouched.
|
|
|
|
|
*
|
|
|
|
|
* If @connection does have a master, then the outputs depend on what is in its
|
|
|
|
|
* #NMSettingConnection:master property:
|
|
|
|
|
*
|
2014-01-28 17:24:26 -05:00
|
|
|
* If "master" is the ifname of an existing #NMDevice, and that device has a
|
|
|
|
|
* compatible master connection activated or activating on it, then
|
|
|
|
|
* @out_master_device, @out_master_connection, and @out_master_ac will all be
|
|
|
|
|
* set. If the device exists and is idle, only @out_master_device will be set.
|
|
|
|
|
* If the device exists and has an incompatible connection on it, an error
|
|
|
|
|
* will be returned.
|
2014-01-29 13:22:53 -05:00
|
|
|
*
|
|
|
|
|
* If "master" is the ifname of a non-existent device, then @out_master_device
|
|
|
|
|
* will be %NULL, and @out_master_connection will be a connection whose
|
2014-01-28 17:24:26 -05:00
|
|
|
* activation would cause the creation of that device. @out_master_ac MAY be
|
|
|
|
|
* set in this case as well (if the connection has started activating, but has
|
|
|
|
|
* not yet created its device).
|
2014-01-29 13:22:53 -05:00
|
|
|
*
|
2014-01-29 13:22:53 -05:00
|
|
|
* If "master" is the UUID of a compatible master connection, then
|
|
|
|
|
* @out_master_connection will be the identified connection, and @out_master_device
|
2014-01-28 17:24:26 -05:00
|
|
|
* and/or @out_master_ac will be set if the connection is currently activating.
|
|
|
|
|
* (@out_master_device will not be set if the device exists but does not have
|
|
|
|
|
* @out_master_connection active/activating on it.)
|
2012-02-26 17:27:42 -06:00
|
|
|
*
|
|
|
|
|
* Returns: %TRUE if the master device and/or connection could be found or if
|
|
|
|
|
* the connection did not require a master, %FALSE otherwise
|
|
|
|
|
**/
|
|
|
|
|
static gboolean
|
|
|
|
|
find_master (NMManager *self,
|
|
|
|
|
NMConnection *connection,
|
|
|
|
|
NMDevice *device,
|
2015-07-14 16:53:24 +02:00
|
|
|
NMSettingsConnection **out_master_connection,
|
2014-01-29 11:48:03 -05:00
|
|
|
NMDevice **out_master_device,
|
2014-01-28 17:24:26 -05:00
|
|
|
NMActiveConnection **out_master_ac,
|
2014-01-29 11:48:03 -05:00
|
|
|
GError **error)
|
2012-02-26 17:27:42 -06:00
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
NMSettingConnection *s_con;
|
|
|
|
|
const char *master;
|
|
|
|
|
NMDevice *master_device = NULL;
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *master_connection;
|
2012-02-26 17:27:42 -06:00
|
|
|
|
|
|
|
|
s_con = nm_connection_get_setting_connection (connection);
|
|
|
|
|
g_assert (s_con);
|
|
|
|
|
master = nm_setting_connection_get_master (s_con);
|
|
|
|
|
|
|
|
|
|
if (master == NULL)
|
|
|
|
|
return TRUE; /* success, but no master */
|
|
|
|
|
|
|
|
|
|
/* Try as an interface name first */
|
2014-10-15 21:17:45 -05:00
|
|
|
master_device = find_device_by_iface (self, master, NULL, connection);
|
2012-02-26 17:27:42 -06:00
|
|
|
if (master_device) {
|
2014-01-29 11:48:03 -05:00
|
|
|
if (master_device == device) {
|
|
|
|
|
g_set_error_literal (error, NM_MANAGER_ERROR, NM_MANAGER_ERROR_DEPENDENCY_FAILED,
|
|
|
|
|
"Device cannot be its own master");
|
2012-02-26 17:27:42 -06:00
|
|
|
return FALSE;
|
2014-01-29 11:48:03 -05:00
|
|
|
}
|
2014-01-29 13:22:53 -05:00
|
|
|
|
2015-07-14 16:53:24 +02:00
|
|
|
master_connection = nm_device_get_settings_connection (master_device);
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if ( master_connection
|
|
|
|
|
&& !is_compatible_with_slave (nm_settings_connection_get_connection (master_connection),
|
|
|
|
|
connection)) {
|
2014-01-29 13:22:53 -05:00
|
|
|
g_set_error (error, NM_MANAGER_ERROR, NM_MANAGER_ERROR_DEPENDENCY_FAILED,
|
2016-03-30 09:00:06 +02:00
|
|
|
"The active connection on %s is not compatible",
|
|
|
|
|
nm_device_get_iface (master_device));
|
2014-01-29 13:22:53 -05:00
|
|
|
return FALSE;
|
|
|
|
|
}
|
2012-02-26 17:27:42 -06:00
|
|
|
} else {
|
|
|
|
|
/* Try master as a connection UUID */
|
2015-07-14 16:53:24 +02:00
|
|
|
master_connection = nm_settings_get_connection_by_uuid (priv->settings, master);
|
2012-02-26 17:27:42 -06:00
|
|
|
if (master_connection) {
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMDevice *candidate;
|
2012-02-26 17:27:42 -06:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
/* Check if the master connection is activated on some device already */
|
|
|
|
|
c_list_for_each_entry (candidate, &priv->devices_lst_head, devices_lst) {
|
2012-02-26 17:27:42 -06:00
|
|
|
if (candidate == device)
|
|
|
|
|
continue;
|
|
|
|
|
|
2015-07-14 16:53:24 +02:00
|
|
|
if (nm_device_get_settings_connection (candidate) == master_connection) {
|
2012-02-26 17:27:42 -06:00
|
|
|
master_device = candidate;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (out_master_connection)
|
|
|
|
|
*out_master_connection = master_connection;
|
|
|
|
|
if (out_master_device)
|
|
|
|
|
*out_master_device = master_device;
|
2017-03-13 14:01:47 +01:00
|
|
|
if (out_master_ac && master_connection) {
|
2018-04-19 15:27:54 +02:00
|
|
|
*out_master_ac = active_connection_find (self, master_connection, NULL,
|
|
|
|
|
NM_ACTIVE_CONNECTION_STATE_DEACTIVATING,
|
|
|
|
|
NULL);
|
2017-03-13 14:01:47 +01:00
|
|
|
}
|
2012-02-26 17:27:42 -06:00
|
|
|
|
2014-01-29 11:48:03 -05:00
|
|
|
if (master_device || master_connection)
|
|
|
|
|
return TRUE;
|
|
|
|
|
else {
|
|
|
|
|
g_set_error_literal (error, NM_MANAGER_ERROR, NM_MANAGER_ERROR_UNKNOWN_DEVICE,
|
|
|
|
|
"Master connection not found or invalid");
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
2012-02-26 17:27:42 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* ensure_master_active_connection:
|
|
|
|
|
* @self: the #NMManager
|
2013-07-29 13:11:47 -05:00
|
|
|
* @subject: the #NMAuthSubject representing the requestor of this activation
|
2012-02-26 17:27:42 -06:00
|
|
|
* @connection: the connection that should depend on @master_connection
|
|
|
|
|
* @device: the #NMDevice, if any, which will activate @connection
|
2014-01-29 13:22:53 -05:00
|
|
|
* @master_connection: the master connection, or %NULL
|
|
|
|
|
* @master_device: the master device, or %NULL
|
2018-03-28 17:18:04 +02:00
|
|
|
* @activation_reason: the reason for activation
|
2012-02-26 17:27:42 -06:00
|
|
|
* @error: the error, if an error occurred
|
|
|
|
|
*
|
|
|
|
|
* Determines whether a given #NMConnection depends on another connection to
|
|
|
|
|
* be activated, and if so, finds that master connection or creates it.
|
|
|
|
|
*
|
2014-01-29 13:22:53 -05:00
|
|
|
* If @master_device and @master_connection are both set then @master_connection
|
2014-01-29 13:22:53 -05:00
|
|
|
* MUST already be activated or activating on @master_device, and the function will
|
|
|
|
|
* return the existing #NMActiveConnection.
|
2014-01-29 13:22:53 -05:00
|
|
|
*
|
2014-01-29 13:22:53 -05:00
|
|
|
* If only @master_device is set, and it has an #NMActiveConnection, then the
|
|
|
|
|
* function will return it if it is a compatible master, or an error if not. If it
|
|
|
|
|
* doesn't have an AC, then the function will create one if a compatible master
|
|
|
|
|
* connection exists, or return an error if not.
|
2014-01-29 13:22:53 -05:00
|
|
|
*
|
|
|
|
|
* If only @master_connection is set, then this will try to find or create a compatible
|
|
|
|
|
* #NMDevice, and either activate @master_connection on that device or return an error.
|
|
|
|
|
*
|
2012-02-26 17:27:42 -06:00
|
|
|
* Returns: the master #NMActiveConnection that the caller should depend on, or
|
|
|
|
|
* %NULL if an error occurred
|
|
|
|
|
*/
|
|
|
|
|
static NMActiveConnection *
|
|
|
|
|
ensure_master_active_connection (NMManager *self,
|
2013-07-29 13:11:47 -05:00
|
|
|
NMAuthSubject *subject,
|
2012-02-26 17:27:42 -06:00
|
|
|
NMConnection *connection,
|
|
|
|
|
NMDevice *device,
|
2015-07-14 16:53:24 +02:00
|
|
|
NMSettingsConnection *master_connection,
|
2012-02-26 17:27:42 -06:00
|
|
|
NMDevice *master_device,
|
2018-03-28 17:18:04 +02:00
|
|
|
NMActivationReason activation_reason,
|
2012-02-26 17:27:42 -06:00
|
|
|
GError **error)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
NMActiveConnection *master_ac = NULL;
|
|
|
|
|
NMDeviceState master_state;
|
|
|
|
|
|
|
|
|
|
g_assert (connection);
|
|
|
|
|
g_assert (master_connection || master_device);
|
|
|
|
|
|
|
|
|
|
/* If the master device isn't activated then we need to activate it using
|
|
|
|
|
* compatible connection. If it's already activating we can just proceed.
|
|
|
|
|
*/
|
|
|
|
|
if (master_device) {
|
2015-07-14 16:53:24 +02:00
|
|
|
NMSettingsConnection *device_connection = nm_device_get_settings_connection (master_device);
|
2014-01-29 13:22:53 -05:00
|
|
|
|
2012-02-26 17:27:42 -06:00
|
|
|
/* If we're passed a connection and a device, we require that connection
|
|
|
|
|
* be already activated on the device, eg returned from find_master().
|
|
|
|
|
*/
|
core: fix activation of slave when master is not active, but device exists
NM fails to activate a slave if the master device already exists
but has not active connection.
One way to reproduce, create a bond master/slave configuration and
ensure that the master device exists (e.g. by activating the bond, and
killing NM without taking down the device, or externally via `ip link add`).
If you try to activate the slave it will fail with the following message
(in nmcli):
"Error: Connection activation failed: The active connection on MASTER is not a valid master for 'SLAVE'"
although MASTER is not active.
This also triggers the following assertion:
#0 0x0000003370c504e9 in g_logv () from /lib64/libglib-2.0.so.0
#1 0x0000003370c5063f in g_log () from /lib64/libglib-2.0.so.0
#2 0x000000000047646a in is_compatible_with_slave (master=0x0, slave=slave@entry=0xc4aa60) at nm-manager.c:2193
#3 0x000000000047e289 in ensure_master_active_connection (self=self@entry=0xc8d150, subject=0x7f23b80059e0, connection=connection@entry=0xc4aa60, device=device@entry=0xcac380, master_connection=master_connection@entry=0x0,
master_device=master_device@entry=0xc9e800, error=error@entry=0x7fffa5cc4958) at nm-manager.c:2395
#4 0x000000000047eb4a in _internal_activate_device (self=self@entry=0xc8d150, active=active@entry=0xcc33b0, error=error@entry=0x7fffa5cc4958) at nm-manager.c:2665
#5 0x000000000047ecf2 in _internal_activate_generic (self=self@entry=0xc8d150, active=active@entry=0xcc33b0, error=error@entry=0x7fffa5cc4958) at nm-manager.c:2712
#6 0x000000000047ef2b in _internal_activation_auth_done (active=0xcc33b0, success=<optimized out>, error_desc=0x0, user_data1=0xc8d150, user_data2=<optimized out>) at nm-manager.c:2848
#7 0x0000000000466fa1 in auth_done (chain=0xcef020, error=0x0, unused=<optimized out>, user_data=<optimized out>) at nm-active-connection.c:603
#8 0x00000000004753da in auth_chain_finish (user_data=0xcef020) at nm-manager-auth.c:88
#9 0x0000003370c492a6 in g_main_context_dispatch () from /lib64/libglib-2.0.so.0
#10 0x0000003370c49628 in g_main_context_iterate.isra () from /lib64/libglib-2.0.so.0
#11 0x0000003370c49a3a in g_main_loop_run () from /lib64/libglib-2.0.so.0
#12 0x0000000000429e65 in main (argc=1, argv=0x7fffa5cc4e48) at main.c:678
Signed-off-by: Thomas Haller <thaller@redhat.com>
2014-06-11 20:35:08 +02:00
|
|
|
g_assert (!master_connection || master_connection == device_connection);
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if ( device_connection
|
|
|
|
|
&& !is_compatible_with_slave (nm_settings_connection_get_connection (device_connection),
|
|
|
|
|
connection)) {
|
2014-01-29 13:22:53 -05:00
|
|
|
g_set_error (error, NM_MANAGER_ERROR, NM_MANAGER_ERROR_DEPENDENCY_FAILED,
|
2016-03-30 09:00:06 +02:00
|
|
|
"The active connection %s is not compatible",
|
2014-01-29 13:22:53 -05:00
|
|
|
nm_connection_get_id (connection));
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
2012-02-26 17:27:42 -06:00
|
|
|
|
|
|
|
|
master_state = nm_device_get_state (master_device);
|
|
|
|
|
if ( (master_state == NM_DEVICE_STATE_ACTIVATED)
|
|
|
|
|
|| nm_device_is_activating (master_device)) {
|
|
|
|
|
/* Device already using master_connection */
|
core: fix activation of slave when master is not active, but device exists
NM fails to activate a slave if the master device already exists
but has not active connection.
One way to reproduce, create a bond master/slave configuration and
ensure that the master device exists (e.g. by activating the bond, and
killing NM without taking down the device, or externally via `ip link add`).
If you try to activate the slave it will fail with the following message
(in nmcli):
"Error: Connection activation failed: The active connection on MASTER is not a valid master for 'SLAVE'"
although MASTER is not active.
This also triggers the following assertion:
#0 0x0000003370c504e9 in g_logv () from /lib64/libglib-2.0.so.0
#1 0x0000003370c5063f in g_log () from /lib64/libglib-2.0.so.0
#2 0x000000000047646a in is_compatible_with_slave (master=0x0, slave=slave@entry=0xc4aa60) at nm-manager.c:2193
#3 0x000000000047e289 in ensure_master_active_connection (self=self@entry=0xc8d150, subject=0x7f23b80059e0, connection=connection@entry=0xc4aa60, device=device@entry=0xcac380, master_connection=master_connection@entry=0x0,
master_device=master_device@entry=0xc9e800, error=error@entry=0x7fffa5cc4958) at nm-manager.c:2395
#4 0x000000000047eb4a in _internal_activate_device (self=self@entry=0xc8d150, active=active@entry=0xcc33b0, error=error@entry=0x7fffa5cc4958) at nm-manager.c:2665
#5 0x000000000047ecf2 in _internal_activate_generic (self=self@entry=0xc8d150, active=active@entry=0xcc33b0, error=error@entry=0x7fffa5cc4958) at nm-manager.c:2712
#6 0x000000000047ef2b in _internal_activation_auth_done (active=0xcc33b0, success=<optimized out>, error_desc=0x0, user_data1=0xc8d150, user_data2=<optimized out>) at nm-manager.c:2848
#7 0x0000000000466fa1 in auth_done (chain=0xcef020, error=0x0, unused=<optimized out>, user_data=<optimized out>) at nm-active-connection.c:603
#8 0x00000000004753da in auth_chain_finish (user_data=0xcef020) at nm-manager-auth.c:88
#9 0x0000003370c492a6 in g_main_context_dispatch () from /lib64/libglib-2.0.so.0
#10 0x0000003370c49628 in g_main_context_iterate.isra () from /lib64/libglib-2.0.so.0
#11 0x0000003370c49a3a in g_main_loop_run () from /lib64/libglib-2.0.so.0
#12 0x0000000000429e65 in main (argc=1, argv=0x7fffa5cc4e48) at main.c:678
Signed-off-by: Thomas Haller <thaller@redhat.com>
2014-06-11 20:35:08 +02:00
|
|
|
g_assert (device_connection);
|
2012-02-26 17:27:42 -06:00
|
|
|
return NM_ACTIVE_CONNECTION (nm_device_get_act_request (master_device));
|
|
|
|
|
}
|
|
|
|
|
|
2014-01-29 13:22:53 -05:00
|
|
|
/* If the device is disconnected, find a compatible connection and
|
2012-02-26 17:27:42 -06:00
|
|
|
* activate it on the device.
|
|
|
|
|
*/
|
2014-09-24 16:58:07 -05:00
|
|
|
if (master_state == NM_DEVICE_STATE_DISCONNECTED || !nm_device_is_real (master_device)) {
|
2017-02-03 15:13:03 +01:00
|
|
|
gs_free NMSettingsConnection **connections = NULL;
|
|
|
|
|
guint i;
|
2012-02-26 17:27:42 -06:00
|
|
|
|
|
|
|
|
g_assert (master_connection == NULL);
|
|
|
|
|
|
|
|
|
|
/* Find a compatible connection and activate this device using it */
|
2018-06-27 14:20:57 +02:00
|
|
|
connections = nm_manager_get_activatable_connections (self, FALSE, TRUE, NULL);
|
2017-02-03 15:13:03 +01:00
|
|
|
for (i = 0; connections[i]; i++) {
|
|
|
|
|
NMSettingsConnection *candidate = connections[i];
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMConnection *cand_conn = nm_settings_connection_get_connection (candidate);
|
2012-02-26 17:27:42 -06:00
|
|
|
|
2013-07-25 15:36:45 +02:00
|
|
|
/* Ensure eg bond/team slave and the candidate master is a
|
|
|
|
|
* bond/team master
|
|
|
|
|
*/
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (!is_compatible_with_slave (cand_conn, connection))
|
2012-02-26 17:27:42 -06:00
|
|
|
continue;
|
|
|
|
|
|
2018-06-27 16:21:43 +02:00
|
|
|
if (nm_device_check_connection_available (master_device,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
cand_conn,
|
2018-06-27 16:21:43 +02:00
|
|
|
NM_DEVICE_CHECK_CON_AVAILABLE_FOR_USER_REQUEST,
|
|
|
|
|
NULL,
|
|
|
|
|
NULL)) {
|
2012-02-26 17:27:42 -06:00
|
|
|
master_ac = nm_manager_activate_connection (self,
|
|
|
|
|
candidate,
|
|
|
|
|
NULL,
|
2016-09-07 17:47:26 +02:00
|
|
|
NULL,
|
2012-09-13 15:57:36 -05:00
|
|
|
master_device,
|
2013-07-29 13:11:47 -05:00
|
|
|
subject,
|
2017-03-07 11:04:36 +01:00
|
|
|
NM_ACTIVATION_TYPE_MANAGED,
|
2018-03-28 17:18:04 +02:00
|
|
|
activation_reason,
|
2012-02-26 17:27:42 -06:00
|
|
|
error);
|
|
|
|
|
return master_ac;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
g_set_error (error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_UNKNOWN_CONNECTION,
|
2016-03-30 09:00:06 +02:00
|
|
|
"No compatible connection found.");
|
2012-02-26 17:27:42 -06:00
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Otherwise, the device is unmanaged, unavailable, or disconnecting */
|
|
|
|
|
g_set_error (error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
2014-10-15 15:27:25 -04:00
|
|
|
NM_MANAGER_ERROR_DEPENDENCY_FAILED,
|
2016-03-30 09:00:06 +02:00
|
|
|
"Device unmanaged or not available for activation");
|
2012-02-26 17:27:42 -06:00
|
|
|
} else if (master_connection) {
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMDevice *candidate;
|
2012-02-26 17:27:42 -06:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
/* Find a compatible device and activate it using this connection */
|
|
|
|
|
c_list_for_each_entry (candidate, &priv->devices_lst_head, devices_lst) {
|
2012-02-26 17:27:42 -06:00
|
|
|
if (candidate == device) {
|
|
|
|
|
/* A device obviously can't be its own master */
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2018-06-27 16:21:43 +02:00
|
|
|
if (!nm_device_check_connection_available (candidate,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
nm_settings_connection_get_connection (master_connection),
|
2018-06-27 16:21:43 +02:00
|
|
|
NM_DEVICE_CHECK_CON_AVAILABLE_FOR_USER_REQUEST,
|
|
|
|
|
NULL,
|
|
|
|
|
NULL))
|
2012-02-26 17:27:42 -06:00
|
|
|
continue;
|
|
|
|
|
|
2014-09-24 16:58:07 -05:00
|
|
|
if (!nm_device_is_software (candidate)) {
|
|
|
|
|
master_state = nm_device_get_state (candidate);
|
|
|
|
|
if (nm_device_is_real (candidate) && master_state != NM_DEVICE_STATE_DISCONNECTED)
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2012-02-26 17:27:42 -06:00
|
|
|
|
2013-01-30 12:52:42 -06:00
|
|
|
master_ac = nm_manager_activate_connection (self,
|
|
|
|
|
master_connection,
|
|
|
|
|
NULL,
|
2016-09-07 17:47:26 +02:00
|
|
|
NULL,
|
2012-09-13 15:57:36 -05:00
|
|
|
candidate,
|
2013-07-29 13:11:47 -05:00
|
|
|
subject,
|
2017-03-07 11:04:36 +01:00
|
|
|
NM_ACTIVATION_TYPE_MANAGED,
|
2018-03-28 17:18:04 +02:00
|
|
|
activation_reason,
|
2013-01-30 12:52:42 -06:00
|
|
|
error);
|
|
|
|
|
return master_ac;
|
2012-02-26 17:27:42 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
g_set_error (error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_UNKNOWN_DEVICE,
|
2016-03-30 09:00:06 +02:00
|
|
|
"No device available");
|
2012-02-26 17:27:42 -06:00
|
|
|
} else
|
|
|
|
|
g_assert_not_reached ();
|
|
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2017-02-24 09:32:05 +01:00
|
|
|
typedef struct {
|
|
|
|
|
NMSettingsConnection *connection;
|
|
|
|
|
NMDevice *device;
|
|
|
|
|
} SlaveConnectionInfo;
|
|
|
|
|
|
2015-05-06 14:38:55 +02:00
|
|
|
/**
|
|
|
|
|
* find_slaves:
|
|
|
|
|
* @manager: #NMManager object
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
* @sett_conn: the master #NMSettingsConnection to find slave connections for
|
|
|
|
|
* @device: the master #NMDevice for the @sett_conn
|
2017-02-24 09:32:05 +01:00
|
|
|
* @out_n_slaves: on return, the number of slaves found
|
2015-05-06 14:38:55 +02:00
|
|
|
*
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
* Given an #NMSettingsConnection, attempts to find its slaves. If @sett_conn is not
|
2015-05-06 14:38:55 +02:00
|
|
|
* master, or has not any slaves, this will return %NULL.
|
|
|
|
|
*
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
* Returns: an array of #SlaveConnectionInfo for given master @sett_conn, or %NULL
|
2015-05-06 14:38:55 +02:00
|
|
|
**/
|
2017-02-24 09:32:05 +01:00
|
|
|
static SlaveConnectionInfo *
|
2015-05-06 14:38:55 +02:00
|
|
|
find_slaves (NMManager *manager,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *sett_conn,
|
2017-02-24 09:32:05 +01:00
|
|
|
NMDevice *device,
|
|
|
|
|
guint *out_n_slaves)
|
2015-05-06 14:38:55 +02:00
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (manager);
|
2017-02-03 14:15:16 +01:00
|
|
|
gs_free NMSettingsConnection **all_connections = NULL;
|
2017-02-24 09:32:05 +01:00
|
|
|
guint n_all_connections;
|
2017-02-03 14:15:16 +01:00
|
|
|
guint i;
|
2017-02-24 09:32:05 +01:00
|
|
|
SlaveConnectionInfo *slaves = NULL;
|
|
|
|
|
guint n_slaves = 0;
|
2015-05-06 14:38:55 +02:00
|
|
|
NMSettingConnection *s_con;
|
2017-02-24 09:32:05 +01:00
|
|
|
gs_unref_hashtable GHashTable *devices = NULL;
|
|
|
|
|
|
|
|
|
|
nm_assert (out_n_slaves);
|
2015-05-06 14:38:55 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
s_con = nm_connection_get_setting_connection (nm_settings_connection_get_connection (sett_conn));
|
2017-02-24 09:32:05 +01:00
|
|
|
g_return_val_if_fail (s_con, NULL);
|
|
|
|
|
|
2017-11-15 16:06:43 +01:00
|
|
|
devices = g_hash_table_new (nm_direct_hash, NULL);
|
2015-05-06 14:38:55 +02:00
|
|
|
|
|
|
|
|
/* Search through all connections, not only inactive ones, because
|
|
|
|
|
* even if a slave was already active, it might be deactivated during
|
|
|
|
|
* master reactivation.
|
|
|
|
|
*/
|
2017-11-22 12:46:58 +01:00
|
|
|
all_connections = nm_settings_get_connections_clone (priv->settings, &n_all_connections,
|
|
|
|
|
NULL, NULL,
|
|
|
|
|
nm_settings_connection_cmp_autoconnect_priority_p_with_data, NULL);
|
2017-02-24 09:32:05 +01:00
|
|
|
for (i = 0; i < n_all_connections; i++) {
|
2015-07-14 16:53:24 +02:00
|
|
|
NMSettingsConnection *master_connection = NULL;
|
2017-02-24 09:32:05 +01:00
|
|
|
NMDevice *master_device = NULL, *slave_device;
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *candidate = all_connections[i];
|
|
|
|
|
|
|
|
|
|
find_master (manager,
|
|
|
|
|
nm_settings_connection_get_connection (candidate),
|
|
|
|
|
NULL,
|
|
|
|
|
&master_connection,
|
|
|
|
|
&master_device,
|
|
|
|
|
NULL,
|
|
|
|
|
NULL);
|
|
|
|
|
if ( (master_connection && master_connection == sett_conn)
|
2015-05-06 14:38:55 +02:00
|
|
|
|| (master_device && master_device == device)) {
|
2017-02-24 09:32:05 +01:00
|
|
|
slave_device = nm_manager_get_best_device_for_connection (manager,
|
|
|
|
|
candidate,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NULL,
|
2017-02-24 09:32:05 +01:00
|
|
|
FALSE,
|
2018-07-10 11:50:21 +02:00
|
|
|
devices,
|
|
|
|
|
NULL);
|
2017-02-24 09:32:05 +01:00
|
|
|
|
|
|
|
|
if (!slaves) {
|
|
|
|
|
/* what we allocate is quite likely much too large. Don't bother, it is only
|
|
|
|
|
* a temporary buffer. */
|
|
|
|
|
slaves = g_new (SlaveConnectionInfo, n_all_connections);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nm_assert (n_slaves < n_all_connections);
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
slaves[n_slaves].connection = candidate,
|
2017-02-24 09:32:05 +01:00
|
|
|
slaves[n_slaves].device = slave_device,
|
|
|
|
|
n_slaves++;
|
|
|
|
|
|
|
|
|
|
if (slave_device)
|
|
|
|
|
g_hash_table_add (devices, slave_device);
|
2015-05-06 14:38:55 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-02-24 09:32:05 +01:00
|
|
|
*out_n_slaves = n_slaves;
|
|
|
|
|
|
|
|
|
|
/* Warning: returns NULL if n_slaves is zero. */
|
|
|
|
|
return slaves;
|
2015-05-06 14:38:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
should_connect_slaves (NMConnection *connection, NMDevice *device)
|
|
|
|
|
{
|
|
|
|
|
NMSettingConnection *s_con;
|
2018-09-05 09:24:33 +02:00
|
|
|
NMSettingConnectionAutoconnectSlaves val;
|
2015-05-06 14:38:55 +02:00
|
|
|
|
|
|
|
|
s_con = nm_connection_get_setting_connection (connection);
|
|
|
|
|
g_assert (s_con);
|
|
|
|
|
|
2018-09-05 09:24:33 +02:00
|
|
|
val = nm_setting_connection_get_autoconnect_slaves (s_con);
|
|
|
|
|
if (val != NM_SETTING_CONNECTION_AUTOCONNECT_SLAVES_DEFAULT)
|
2015-05-06 14:38:55 +02:00
|
|
|
goto out;
|
|
|
|
|
|
2018-09-05 09:24:33 +02:00
|
|
|
val = nm_config_data_get_connection_default_int64 (NM_CONFIG_GET_DATA,
|
2018-11-14 14:52:21 +01:00
|
|
|
NM_CON_DEFAULT ("connection.autoconnect-slaves"),
|
2018-09-05 09:24:33 +02:00
|
|
|
device,
|
|
|
|
|
0, 1, -1);
|
2015-05-06 14:38:55 +02:00
|
|
|
|
|
|
|
|
out:
|
2018-09-05 09:24:33 +02:00
|
|
|
if (val == NM_SETTING_CONNECTION_AUTOCONNECT_SLAVES_NO)
|
2015-05-06 14:38:55 +02:00
|
|
|
return FALSE;
|
2018-09-05 09:24:33 +02:00
|
|
|
if (val == NM_SETTING_CONNECTION_AUTOCONNECT_SLAVES_YES)
|
2015-05-06 14:38:55 +02:00
|
|
|
return TRUE;
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
|
|
|
|
|
all: don't use gchar/gshort/gint/glong but C types
We commonly don't use the glib typedefs for char/short/int/long,
but their C types directly.
$ git grep '\<g\(char\|short\|int\|long\|float\|double\)\>' | wc -l
587
$ git grep '\<\(char\|short\|int\|long\|float\|double\)\>' | wc -l
21114
One could argue that using the glib typedefs is preferable in
public API (of our glib based libnm library) or where it clearly
is related to glib, like during
g_object_set (obj, PROPERTY, (gint) value, NULL);
However, that argument does not seem strong, because in practice we don't
follow that argument today, and seldomly use the glib typedefs.
Also, the style guide for this would be hard to formalize, because
"using them where clearly related to a glib" is a very loose suggestion.
Also note that glib typedefs will always just be typedefs of the
underlying C types. There is no danger of glib changing the meaning
of these typedefs (because that would be a major API break of glib).
A simple style guide is instead: don't use these typedefs.
No manual actions, I only ran the bash script:
FILES=($(git ls-files '*.[hc]'))
sed -i \
-e 's/\<g\(char\|short\|int\|long\|float\|double\)\>\( [^ ]\)/\1\2/g' \
-e 's/\<g\(char\|short\|int\|long\|float\|double\)\> /\1 /g' \
-e 's/\<g\(char\|short\|int\|long\|float\|double\)\>/\1/g' \
"${FILES[@]}"
2018-07-11 07:40:19 +02:00
|
|
|
static int
|
2017-05-15 17:17:26 +02:00
|
|
|
compare_slaves (gconstpointer a, gconstpointer b, gpointer sort_by_name)
|
2017-03-02 16:14:18 +01:00
|
|
|
{
|
|
|
|
|
const SlaveConnectionInfo *a_info = a;
|
|
|
|
|
const SlaveConnectionInfo *b_info = b;
|
|
|
|
|
|
|
|
|
|
/* Slaves without a device at the end */
|
|
|
|
|
if (!a_info->device)
|
|
|
|
|
return 1;
|
|
|
|
|
if (!b_info->device)
|
|
|
|
|
return -1;
|
|
|
|
|
|
2017-05-15 17:17:26 +02:00
|
|
|
if (GPOINTER_TO_INT (sort_by_name)) {
|
|
|
|
|
return g_strcmp0 (nm_device_get_iface (a_info->device),
|
|
|
|
|
nm_device_get_iface (b_info->device));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return nm_device_get_ifindex (a_info->device) - nm_device_get_ifindex (b_info->device);
|
2017-03-02 16:14:18 +01:00
|
|
|
}
|
|
|
|
|
|
2017-02-24 09:32:05 +01:00
|
|
|
static void
|
2016-03-02 11:38:26 +01:00
|
|
|
autoconnect_slaves (NMManager *self,
|
2015-07-14 16:53:24 +02:00
|
|
|
NMSettingsConnection *master_connection,
|
2015-05-06 14:38:55 +02:00
|
|
|
NMDevice *master_device,
|
|
|
|
|
NMAuthSubject *subject)
|
|
|
|
|
{
|
|
|
|
|
GError *local_err = NULL;
|
|
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (should_connect_slaves (nm_settings_connection_get_connection (master_connection),
|
|
|
|
|
master_device)) {
|
2017-02-24 09:32:05 +01:00
|
|
|
gs_free SlaveConnectionInfo *slaves = NULL;
|
|
|
|
|
guint i, n_slaves = 0;
|
2015-05-06 14:38:55 +02:00
|
|
|
|
2017-02-24 09:32:05 +01:00
|
|
|
slaves = find_slaves (self, master_connection, master_device, &n_slaves);
|
2017-03-02 16:14:18 +01:00
|
|
|
if (n_slaves > 1) {
|
2017-10-24 08:35:42 +02:00
|
|
|
gs_free char *value = NULL;
|
|
|
|
|
|
|
|
|
|
value = nm_config_data_get_value (NM_CONFIG_GET_DATA,
|
|
|
|
|
NM_CONFIG_KEYFILE_GROUP_MAIN,
|
|
|
|
|
NM_CONFIG_KEYFILE_KEY_MAIN_SLAVES_ORDER,
|
|
|
|
|
NM_CONFIG_GET_VALUE_STRIP);
|
2017-03-02 16:14:18 +01:00
|
|
|
g_qsort_with_data (slaves, n_slaves, sizeof (slaves[0]),
|
2017-05-15 17:17:26 +02:00
|
|
|
compare_slaves,
|
|
|
|
|
GINT_TO_POINTER (!nm_streq0 (value, "index")));
|
2017-03-02 16:14:18 +01:00
|
|
|
}
|
2015-05-06 14:38:55 +02:00
|
|
|
|
2017-02-24 09:32:05 +01:00
|
|
|
for (i = 0; i < n_slaves; i++) {
|
|
|
|
|
SlaveConnectionInfo *slave = &slaves[i];
|
2016-09-26 16:01:27 +02:00
|
|
|
const char *uuid;
|
|
|
|
|
|
|
|
|
|
/* To avoid loops when autoconnecting slaves, we propagate
|
|
|
|
|
* the UUID of the initial connection down to slaves until
|
|
|
|
|
* the same connection is found.
|
|
|
|
|
*/
|
|
|
|
|
uuid = g_object_get_qdata (G_OBJECT (master_connection),
|
|
|
|
|
autoconnect_root_quark ());
|
2017-02-24 09:32:05 +01:00
|
|
|
if (nm_streq0 (nm_settings_connection_get_uuid (slave->connection), uuid)) {
|
2016-09-26 16:01:27 +02:00
|
|
|
_LOGI (LOGD_CORE,
|
|
|
|
|
"will NOT activate slave connection '%s' (%s) as a dependency for master '%s' (%s): "
|
|
|
|
|
"circular dependency detected",
|
2017-02-24 09:32:05 +01:00
|
|
|
nm_settings_connection_get_id (slave->connection),
|
|
|
|
|
nm_settings_connection_get_uuid (slave->connection),
|
2016-09-26 16:01:27 +02:00
|
|
|
nm_settings_connection_get_id (master_connection),
|
|
|
|
|
nm_settings_connection_get_uuid (master_connection));
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!uuid)
|
|
|
|
|
uuid = nm_settings_connection_get_uuid (master_connection);
|
2017-02-24 09:32:05 +01:00
|
|
|
g_object_set_qdata_full (G_OBJECT (slave->connection),
|
2016-09-26 16:01:27 +02:00
|
|
|
autoconnect_root_quark (),
|
|
|
|
|
g_strdup (uuid),
|
|
|
|
|
g_free);
|
|
|
|
|
|
2017-02-24 09:32:05 +01:00
|
|
|
if (!slave->device) {
|
2017-02-23 18:15:10 +01:00
|
|
|
_LOGD (LOGD_CORE,
|
|
|
|
|
"will NOT activate slave connection '%s' (%s) as a dependency for master '%s' (%s): "
|
|
|
|
|
"no compatible device found",
|
2017-02-24 09:32:05 +01:00
|
|
|
nm_settings_connection_get_id (slave->connection),
|
|
|
|
|
nm_settings_connection_get_uuid (slave->connection),
|
2017-02-23 18:15:10 +01:00
|
|
|
nm_settings_connection_get_id (master_connection),
|
|
|
|
|
nm_settings_connection_get_uuid (master_connection));
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGD (LOGD_CORE, "will activate slave connection '%s' (%s) as a dependency for master '%s' (%s)",
|
2017-02-24 09:32:05 +01:00
|
|
|
nm_settings_connection_get_id (slave->connection),
|
|
|
|
|
nm_settings_connection_get_uuid (slave->connection),
|
2016-03-02 11:38:26 +01:00
|
|
|
nm_settings_connection_get_id (master_connection),
|
|
|
|
|
nm_settings_connection_get_uuid (master_connection));
|
2015-05-06 14:38:55 +02:00
|
|
|
|
|
|
|
|
/* Schedule slave activation */
|
2016-03-02 11:38:26 +01:00
|
|
|
nm_manager_activate_connection (self,
|
2017-02-24 09:32:05 +01:00
|
|
|
slave->connection,
|
2015-05-06 14:38:55 +02:00
|
|
|
NULL,
|
2016-09-07 17:47:26 +02:00
|
|
|
NULL,
|
2017-02-24 09:32:05 +01:00
|
|
|
slave->device,
|
2015-05-06 14:38:55 +02:00
|
|
|
subject,
|
2017-03-07 11:04:36 +01:00
|
|
|
NM_ACTIVATION_TYPE_MANAGED,
|
2018-03-28 17:18:04 +02:00
|
|
|
NM_ACTIVATION_REASON_AUTOCONNECT_SLAVES,
|
2015-05-06 14:38:55 +02:00
|
|
|
&local_err);
|
|
|
|
|
if (local_err) {
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGW (LOGD_CORE, "Slave connection activation failed: %s", local_err->message);
|
2017-02-24 12:31:20 +01:00
|
|
|
g_clear_error (&local_err);
|
2015-05-06 14:38:55 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-28 16:19:20 -05:00
|
|
|
static gboolean
|
|
|
|
|
_internal_activate_vpn (NMManager *self, NMActiveConnection *active, GError **error)
|
2012-02-23 10:04:41 -06:00
|
|
|
{
|
2018-02-05 15:17:06 +01:00
|
|
|
nm_assert (NM_IS_VPN_CONNECTION (active));
|
2012-09-14 15:21:29 -05:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
nm_dbus_object_export (NM_DBUS_OBJECT (active));
|
2018-02-05 15:17:06 +01:00
|
|
|
if (!nm_vpn_manager_activate_connection (NM_MANAGER_GET_PRIVATE (self)->vpn_manager,
|
|
|
|
|
NM_VPN_CONNECTION (active),
|
|
|
|
|
error)) {
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
nm_dbus_object_unexport (NM_DBUS_OBJECT (active));
|
2018-02-05 15:17:06 +01:00
|
|
|
return FALSE;
|
|
|
|
|
}
|
manager: unexport VPN connections when the activation fails early
When a VPN connection can't be activated we have to unexport and
dispose it. Commit f2182fbf9b24 ("core: don't emit double
PropertiesChanged signal for new active connections") removed the call
to nm_exported_object_unexport() in case of failure because the active
connection already gets unreferenced on failure.
However, an exported object can't be disposed until it's explicitly
unexported because GDBus code keeps a reference to it. The result was
that the active connection was kept alive and exported, but without
explicit references to it. As soon as the connection was unexported,
it was also automatically disposed, causing issues like:
(src/nm-exported-object.c:1025):dispose: code should not be reached
#0 _g_log_abort () at /lib64/libglib-2.0.so.0
#1 g_logv () at /lib64/libglib-2.0.so.0
#2 g_log () at /lib64/libglib-2.0.so.0
#3 g_warn_message () at /lib64/libglib-2.0.so.0
#4 dispose (object=0xaaf110) at src/nm-exported-object.c:1025
#5 dispose (object=0xaaf110) at src/nm-active-connection.c:1246
#6 dispose (object=0xaaf110) at src/vpn/nm-vpn-connection.c:2642
#7 g_object_unref () at /lib64/libgobject-2.0.so.0
#8 registration_data_free () at /lib64/libgio-2.0.so.0
#9 g_hash_table_remove_internal () at /lib64/libglib-2.0.so.0
#10 g_dbus_object_manager_server_unexport_unlocked () at /lib64/libgio-2.0.so.0
#11 g_dbus_object_manager_server_unexport () at /lib64/libgio-2.0.so.0
#12 nm_bus_manager_unregister_object (self=0x9069e0, object=object@entry=0xaaf110) at src/nm-bus-manager.c:858
#13 nm_exported_object_unexport (self=0xaaf110) at src/nm-exported-object.c:714
#14 _settings_connection_removed (connection=<optimized out>, user_data=0xaaf110) at src/nm-active-connection.c:184
#15 g_closure_invoke () at /lib64/libgobject-2.0.so.0
#16 signal_emit_unlocked_R () at /lib64/libgobject-2.0.so.0
#17 g_signal_emit_valist () at /lib64/libgobject-2.0.so.0
#18 g_signal_emit_by_name () at /lib64/libgobject-2.0.so.0
#19 nm_settings_connection_signal_remove (self=self@entry=0x9e4a80, allow_reuse=allow_reuse@entry=0) at src/settings/nm-settings-connection.c:2085
#20 do_delete (self=0x9e4a80, callback=0x58106a <con_delete_cb>, user_data=0xa84fa0) at src/settings/nm-settings-connection.c:768
#21 do_delete (connection=0x9e4a80, callback=0x58106a <con_delete_cb>, user_data=0xa84fa0) at src/settings/plugins/keyfile/nms-keyfile-connection.c:127
#22 nm_settings_connection_delete (self=self@entry=0x9e4a80, callback=callback@entry=0x58106a <con_delete_cb>, user_data=0xa84fa0) at src/settings/nm-settings-connection.c:694
#23 delete_auth_cb (self=self@entry=0x9e4a80, context=context@entry=0x7fffd80131e0, subject=0x91fb40, error=<optimized out>, data=data@entry=0x0) at src/settings/nm-settings-connection.c:1879
#24 pk_auth_cb (chain=0x7fffd00024a0, chain_error=<optimized out>, context=0x7fffd80131e0, user_data=<optimized out>) at src/settings/nm-settings-connection.c:1351
#25 auth_chain_finish (user_data=0x7fffd00024a0) at src/nm-auth-utils.c:92
#26 g_idle_dispatch () at /lib64/libglib-2.0.so.0
Restore the unexport upon failure to fix this.
Fixes: f2182fbf9b2423bd8509b2f0cf218edd96dac32c
https://bugzilla.redhat.com/show_bug.cgi?id=1440077
(cherry picked from commit 69fd96118e9a5e6b613644c2cb61911d554e7f3b)
2017-04-08 09:43:42 +02:00
|
|
|
|
2018-02-05 15:17:06 +01:00
|
|
|
active_connection_add (self, active);
|
|
|
|
|
return TRUE;
|
2012-02-23 10:04:41 -06:00
|
|
|
}
|
|
|
|
|
|
2016-03-24 15:20:44 +01:00
|
|
|
/* Traverse the device to disconnected state. This means that the device is ready
|
|
|
|
|
* for connection and will proceed activating if there's an activation request
|
|
|
|
|
* enqueued.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
unmanaged_to_disconnected (NMDevice *device)
|
|
|
|
|
{
|
|
|
|
|
/* when creating the software device, it can happen that the device is
|
|
|
|
|
* still unmanaged by NM_UNMANAGED_PLATFORM_INIT because we didn't yet
|
|
|
|
|
* get the udev event. At this point, we can no longer delay the activation
|
|
|
|
|
* and force the device to be managed. */
|
|
|
|
|
nm_device_set_unmanaged_by_flags (device, NM_UNMANAGED_PLATFORM_INIT, FALSE, NM_DEVICE_STATE_REASON_USER_REQUESTED);
|
|
|
|
|
|
|
|
|
|
nm_device_set_unmanaged_by_flags (device, NM_UNMANAGED_USER_EXPLICIT, FALSE, NM_DEVICE_STATE_REASON_USER_REQUESTED);
|
|
|
|
|
|
core: don't require manageable device in unmanaged_to_disconnected()
It seems the assert there is too strict. I don't really understand why
it fails, but I also don't see why the assert is supposed to hold.
Just return in case the device is unmanagable at this point.
The activation shall fail later.
Traceback from a test build of commit a7aca2ab08abcc5bee02f0f6f9ffe899919f4234:
#0 0x00007fdb28ffb643 in g_logv (log_domain=0x7fdb2b584cc9 "NetworkManager", log_level=G_LOG_LEVEL_CRITICAL, format=<optimized out>, args=args@entry=0x7fff10630200) at gmessages.c:1086
#1 0x00007fdb28ffb7bf in g_log (log_domain=log_domain@entry=0x7fdb2b584cc9 "NetworkManager", log_level=log_level@entry=G_LOG_LEVEL_CRITICAL, format=format@entry=0x7fdb29069190 "%s: assertion '%s' failed") at gmessages.c:1119
#2 0x00007fdb28ffb7f9 in g_return_if_fail_warning (log_domain=log_domain@entry=0x7fdb2b584cc9 "NetworkManager", pretty_function=pretty_function@entry=0x7fdb2b54fee0 <__func__.38922> "unmanaged_to_disconnected", expression=expression@entry=0x7fdb2b54d450 "nm_device_get_managed (device, FALSE)") at gmessages.c:1128
#3 0x00007fdb2b36e05b in unmanaged_to_disconnected (device=device@entry=0x7fdb2d2384f0 [NMDeviceVlan]) at src/nm-manager.c:3201
#4 0x00007fdb2b37eb3a in _internal_activate_generic (error=0x7fff106303d0, active=0x7fdb2d1d4550 [NMActRequest], self=0x0) at src/nm-manager.c:3430
#5 0x00007fdb2b37eb3a in _internal_activate_generic (self=self@entry=0x7fdb2d02b090 [NMManager], active=active@entry=0x7fdb2d1d4550 [NMActRequest], error=error@entry=0x7fff10630450) at src/nm-manager.c:3458
#6 0x00007fdb2b37fe90 in _activation_auth_done (active=0x7fdb2d1d4550 [NMActRequest], success=1, error_desc=0x0, user_data1=0x7fdb2d02b090, user_data2=0x7fdb0800bec0) at src/nm-manager.c:3866
#7 0x00007fdb2b4cc9d7 in auth_done (chain=0x7fdb2d17de30, error=0x0, unused=<optimized out>, user_data=<optimized out>) at src/nm-active-connection.c:929
#8 0x00007fdb2b4d6884 in auth_chain_finish (user_data=0x7fdb2d17de30) at src/nm-auth-utils.c:92
#9 0x00007fdb28ff4d7a in g_main_context_dispatch (context=0x7fdb2cff2e00) at gmain.c:3152
#10 0x00007fdb28ff4d7a in g_main_context_dispatch (context=context@entry=0x7fdb2cff2e00) at gmain.c:3767
#11 0x00007fdb28ff50b8 in g_main_context_iterate (context=0x7fdb2cff2e00, block=block@entry=1, dispatch=dispatch@entry=1, self=<optimized out>) at gmain.c:3838
#12 0x00007fdb28ff538a in g_main_loop_run (loop=0x7fdb2cff2ec0) at gmain.c:4032
#13 0x00007fdb2b349ed7 in main (argc=1, argv=0x7fff106307a8) at src/main.c:438
https://bugzilla.redhat.com/show_bug.cgi?id=1478911
2017-09-04 13:11:08 +02:00
|
|
|
if (!nm_device_get_managed (device, FALSE)) {
|
|
|
|
|
/* the device is still marked as unmanaged. Nothing to do. */
|
|
|
|
|
return;
|
|
|
|
|
}
|
2016-03-24 15:20:44 +01:00
|
|
|
|
|
|
|
|
if (nm_device_get_state (device) == NM_DEVICE_STATE_UNMANAGED) {
|
|
|
|
|
nm_device_state_changed (device,
|
2016-10-25 15:27:57 +02:00
|
|
|
NM_DEVICE_STATE_UNAVAILABLE,
|
|
|
|
|
NM_DEVICE_STATE_REASON_USER_REQUESTED);
|
2016-03-24 15:20:44 +01:00
|
|
|
}
|
|
|
|
|
|
2018-02-05 15:01:48 +01:00
|
|
|
if ( nm_device_get_state (device) == NM_DEVICE_STATE_UNAVAILABLE
|
|
|
|
|
&& nm_device_is_available (device, NM_DEVICE_CHECK_DEV_AVAILABLE_FOR_USER_REQUEST)) {
|
2016-03-24 15:20:44 +01:00
|
|
|
nm_device_state_changed (device,
|
2016-10-25 15:27:57 +02:00
|
|
|
NM_DEVICE_STATE_DISCONNECTED,
|
|
|
|
|
NM_DEVICE_STATE_REASON_USER_REQUESTED);
|
2016-03-24 15:20:44 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-24 15:20:44 +01:00
|
|
|
/* The parent connection is ready; we can proceed realizing the device and
|
|
|
|
|
* progressing the device to disconencted state.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
active_connection_parent_active (NMActiveConnection *active,
|
|
|
|
|
NMActiveConnection *parent_ac,
|
|
|
|
|
NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
NMDevice *device = nm_active_connection_get_device (active);
|
|
|
|
|
GError *error = NULL;
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *sett_conn;
|
2018-02-05 15:51:33 +01:00
|
|
|
NMDevice *parent;
|
2016-03-24 15:20:44 +01:00
|
|
|
|
|
|
|
|
g_signal_handlers_disconnect_by_func (active,
|
|
|
|
|
(GCallback) active_connection_parent_active,
|
|
|
|
|
self);
|
|
|
|
|
|
2018-02-05 15:51:33 +01:00
|
|
|
if (!parent_ac) {
|
2016-04-27 18:26:39 +02:00
|
|
|
_LOGW (LOGD_CORE, "The parent connection device '%s' depended on disappeared.",
|
|
|
|
|
nm_device_get_iface (device));
|
2018-02-05 16:45:33 +01:00
|
|
|
nm_active_connection_set_state_fail (active,
|
|
|
|
|
NM_ACTIVE_CONNECTION_STATE_REASON_DEVICE_REMOVED,
|
|
|
|
|
"parent device disappeared");
|
2018-02-05 15:51:33 +01:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
sett_conn = nm_active_connection_get_settings_connection (active);
|
2018-02-05 15:51:33 +01:00
|
|
|
parent = nm_active_connection_get_device (parent_ac);
|
|
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (!nm_device_create_and_realize (device,
|
|
|
|
|
nm_settings_connection_get_connection (sett_conn),
|
|
|
|
|
parent,
|
|
|
|
|
&error)) {
|
2018-02-05 15:51:33 +01:00
|
|
|
_LOGW (LOGD_CORE, "Could not realize device '%s': %s",
|
|
|
|
|
nm_device_get_iface (device), error->message);
|
2018-02-05 16:45:33 +01:00
|
|
|
nm_active_connection_set_state_fail (active,
|
|
|
|
|
NM_ACTIVE_CONNECTION_STATE_REASON_DEVICE_REALIZE_FAILED,
|
|
|
|
|
"failure to realize device");
|
2018-02-05 15:51:33 +01:00
|
|
|
return;
|
2016-03-24 15:20:44 +01:00
|
|
|
}
|
2018-02-05 15:51:33 +01:00
|
|
|
|
|
|
|
|
/* We can now proceed to disconnected state so that activation proceeds. */
|
|
|
|
|
unmanaged_to_disconnected (device);
|
2016-03-24 15:20:44 +01:00
|
|
|
}
|
|
|
|
|
|
2013-08-28 16:19:20 -05:00
|
|
|
static gboolean
|
|
|
|
|
_internal_activate_device (NMManager *self, NMActiveConnection *active, GError **error)
|
2008-03-26 13:43:01 +00:00
|
|
|
{
|
core: rework nm_device_steal_connection()
nm_device_steal_connection() was a bit misleading. It only had one caller,
and what _internal_activate_device() really wants it to deactivate all
other active-connections for the same connection. Hence, it already
performed a lookup for the active-connection that should be disconnected,
only to then lookup the device, and tell it to steal the connection.
Note, that if existing_ac happens to be neither the queued nor the currenct
active connection, then previously it would have done nothing. It's
unclear when that exactly can happen, however, we can avoid that
question entirely.
Instead of having steal-connection(), have a disconnect-active-connection().
If there is no matching device, it will just set the active-connection's
state to DISCONNECTED. Which in turn does nothing, if the state is
already DISCONNECTED.
2018-04-22 12:13:44 +02:00
|
|
|
NMDevice *device, *master_device = NULL;
|
2015-07-14 16:53:24 +02:00
|
|
|
NMConnection *applied;
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *sett_conn;
|
2015-07-14 16:53:24 +02:00
|
|
|
NMSettingsConnection *master_connection = NULL;
|
2014-10-15 21:17:45 -05:00
|
|
|
NMConnection *existing_connection = NULL;
|
2014-01-28 17:24:26 -05:00
|
|
|
NMActiveConnection *master_ac = NULL;
|
2014-10-15 21:17:45 -05:00
|
|
|
NMAuthSubject *subject;
|
2018-07-10 11:17:05 +02:00
|
|
|
GError *local = NULL;
|
2018-04-18 11:08:05 +02:00
|
|
|
NMConnectionMultiConnect multi_connect;
|
2008-03-26 13:43:01 +00:00
|
|
|
|
2013-10-31 14:13:33 +01:00
|
|
|
g_return_val_if_fail (NM_IS_MANAGER (self), FALSE);
|
|
|
|
|
g_return_val_if_fail (NM_IS_ACTIVE_CONNECTION (active), FALSE);
|
|
|
|
|
g_return_val_if_fail (error == NULL || *error == NULL, FALSE);
|
|
|
|
|
|
2013-08-28 16:19:20 -05:00
|
|
|
g_assert (NM_IS_VPN_CONNECTION (active) == FALSE);
|
2008-03-26 13:43:01 +00:00
|
|
|
|
2018-04-12 09:48:16 +02:00
|
|
|
device = nm_active_connection_get_device (active);
|
|
|
|
|
g_return_val_if_fail (device != NULL, FALSE);
|
|
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
sett_conn = nm_active_connection_get_settings_connection (active);
|
|
|
|
|
nm_assert (sett_conn);
|
2009-10-16 11:52:27 -07:00
|
|
|
|
2015-07-14 16:53:24 +02:00
|
|
|
applied = nm_active_connection_get_applied_connection (active);
|
|
|
|
|
|
2014-10-15 21:17:45 -05:00
|
|
|
/* If the device is active and its connection is not visible to the
|
|
|
|
|
* user that's requesting this new activation, fail, since other users
|
|
|
|
|
* should not be allowed to implicitly deactivate private connections
|
|
|
|
|
* by activating a connection of their own.
|
|
|
|
|
*/
|
|
|
|
|
existing_connection = nm_device_get_applied_connection (device);
|
|
|
|
|
subject = nm_active_connection_get_subject (active);
|
2018-04-12 09:48:16 +02:00
|
|
|
if ( existing_connection
|
|
|
|
|
&& !nm_auth_is_subject_in_acl_set_error (existing_connection,
|
|
|
|
|
subject,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
error)) {
|
|
|
|
|
g_prefix_error (error, "Private connection already active on the device: ");
|
2014-10-15 21:17:45 -05:00
|
|
|
return FALSE;
|
2012-02-26 17:27:42 -06:00
|
|
|
}
|
|
|
|
|
|
2013-11-04 19:51:28 -06:00
|
|
|
/* Final connection must be available on device */
|
2018-07-10 11:17:05 +02:00
|
|
|
if (!nm_device_check_connection_available (device, applied, NM_DEVICE_CHECK_CON_AVAILABLE_FOR_USER_REQUEST, NULL, &local)) {
|
2013-11-04 19:51:28 -06:00
|
|
|
g_set_error (error, NM_MANAGER_ERROR, NM_MANAGER_ERROR_UNKNOWN_CONNECTION,
|
2018-07-10 11:17:05 +02:00
|
|
|
"Connection '%s' is not available on device %s because %s",
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
nm_settings_connection_get_id (sett_conn),
|
2018-07-10 11:17:05 +02:00
|
|
|
nm_device_get_iface (device),
|
|
|
|
|
local->message);
|
|
|
|
|
g_error_free (local);
|
2013-08-28 16:19:20 -05:00
|
|
|
return FALSE;
|
2012-02-26 17:27:42 -06:00
|
|
|
}
|
|
|
|
|
|
2018-04-17 07:31:13 +02:00
|
|
|
if (nm_active_connection_get_activation_type (active) == NM_ACTIVATION_TYPE_MANAGED)
|
|
|
|
|
nm_device_sys_iface_state_set (device, NM_DEVICE_SYS_IFACE_STATE_MANAGED);
|
|
|
|
|
|
2014-09-24 16:58:07 -05:00
|
|
|
/* Create any backing resources the device needs */
|
|
|
|
|
if (!nm_device_is_real (device)) {
|
|
|
|
|
NMDevice *parent;
|
|
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
parent = find_parent_device_for_connection (self,
|
|
|
|
|
nm_settings_connection_get_connection (sett_conn),
|
|
|
|
|
NULL);
|
2016-03-24 15:20:44 +01:00
|
|
|
|
|
|
|
|
if (parent && !nm_device_is_real (parent)) {
|
|
|
|
|
NMSettingsConnection *parent_con;
|
|
|
|
|
NMActiveConnection *parent_ac;
|
|
|
|
|
|
|
|
|
|
parent_con = nm_device_get_best_connection (parent, NULL, error);
|
|
|
|
|
if (!parent_con) {
|
|
|
|
|
g_prefix_error (error, "%s failed to create parent: ", nm_device_get_iface (device));
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
|
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
parent_ac = nm_manager_activate_connection (self,
|
|
|
|
|
parent_con,
|
|
|
|
|
NULL,
|
|
|
|
|
NULL,
|
|
|
|
|
parent,
|
2018-03-28 17:18:04 +02:00
|
|
|
subject,
|
|
|
|
|
NM_ACTIVATION_TYPE_MANAGED,
|
|
|
|
|
nm_active_connection_get_activation_reason (active),
|
|
|
|
|
error);
|
2016-03-24 15:20:44 +01:00
|
|
|
if (!parent_ac) {
|
|
|
|
|
g_prefix_error (error, "%s failed to activate parent: ", nm_device_get_iface (device));
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* We can't realize now; defer until the parent device is ready. */
|
|
|
|
|
g_signal_connect (active,
|
|
|
|
|
NM_ACTIVE_CONNECTION_PARENT_ACTIVE,
|
|
|
|
|
(GCallback) active_connection_parent_active,
|
|
|
|
|
self);
|
|
|
|
|
nm_active_connection_set_parent (active, parent_ac);
|
|
|
|
|
} else {
|
|
|
|
|
/* We can realize now; no need to wait for a parent device. */
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (!nm_device_create_and_realize (device,
|
|
|
|
|
nm_settings_connection_get_connection (sett_conn),
|
|
|
|
|
parent,
|
|
|
|
|
error)) {
|
2016-03-24 15:20:44 +01:00
|
|
|
g_prefix_error (error, "%s failed to create resources: ", nm_device_get_iface (device));
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
2014-09-24 16:58:07 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2012-02-26 17:27:42 -06:00
|
|
|
/* Try to find the master connection/device if the connection has a dependency */
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (!find_master (self,
|
|
|
|
|
applied,
|
|
|
|
|
device,
|
|
|
|
|
&master_connection,
|
|
|
|
|
&master_device,
|
|
|
|
|
&master_ac,
|
2016-03-30 09:00:06 +02:00
|
|
|
error)) {
|
|
|
|
|
g_prefix_error (error, "Can not find a master for %s: ",
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
nm_settings_connection_get_id (sett_conn));
|
2013-08-28 16:19:20 -05:00
|
|
|
return FALSE;
|
2016-03-30 09:00:06 +02:00
|
|
|
}
|
2012-02-26 17:27:42 -06:00
|
|
|
|
|
|
|
|
/* Ensure there's a master active connection the new connection we're
|
|
|
|
|
* activating can depend on.
|
|
|
|
|
*/
|
|
|
|
|
if (master_connection || master_device) {
|
|
|
|
|
if (master_connection) {
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGD (LOGD_CORE, "Activation of '%s' requires master connection '%s'",
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
nm_settings_connection_get_id (sett_conn),
|
2016-03-02 11:38:26 +01:00
|
|
|
nm_settings_connection_get_id (master_connection));
|
2012-02-26 17:27:42 -06:00
|
|
|
}
|
|
|
|
|
if (master_device) {
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGD (LOGD_CORE, "Activation of '%s' requires master device '%s'",
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
nm_settings_connection_get_id (sett_conn),
|
2016-03-02 11:38:26 +01:00
|
|
|
nm_device_get_ip_iface (master_device));
|
2012-02-26 17:27:42 -06:00
|
|
|
}
|
|
|
|
|
|
2013-08-28 16:19:20 -05:00
|
|
|
/* Ensure eg bond slave and the candidate master is a bond master */
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if ( master_connection
|
|
|
|
|
&& !is_compatible_with_slave (nm_settings_connection_get_connection (master_connection),
|
|
|
|
|
applied)) {
|
2016-03-30 09:00:06 +02:00
|
|
|
g_set_error (error, NM_MANAGER_ERROR, NM_MANAGER_ERROR_DEPENDENCY_FAILED,
|
|
|
|
|
"The master connection '%s' is not compatible with '%s'",
|
|
|
|
|
nm_settings_connection_get_id (master_connection),
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
nm_settings_connection_get_id (sett_conn));
|
2013-08-28 16:19:20 -05:00
|
|
|
return FALSE;
|
2012-02-26 17:27:42 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!master_ac) {
|
2014-01-28 17:24:26 -05:00
|
|
|
master_ac = ensure_master_active_connection (self,
|
|
|
|
|
nm_active_connection_get_subject (active),
|
2015-07-14 16:53:24 +02:00
|
|
|
applied,
|
2014-01-28 17:24:26 -05:00
|
|
|
device,
|
|
|
|
|
master_connection,
|
|
|
|
|
master_device,
|
2018-03-28 17:18:04 +02:00
|
|
|
nm_active_connection_get_activation_reason (active),
|
2014-01-28 17:24:26 -05:00
|
|
|
error);
|
|
|
|
|
if (!master_ac) {
|
2016-03-30 09:00:06 +02:00
|
|
|
if (master_device) {
|
|
|
|
|
g_prefix_error (error, "Master device '%s' can't be activated: ",
|
|
|
|
|
nm_device_get_ip_iface (device));
|
|
|
|
|
} else {
|
|
|
|
|
g_prefix_error (error, "Master connection '%s' can't be activated: ",
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
nm_settings_connection_get_id (sett_conn));
|
2016-03-30 09:00:06 +02:00
|
|
|
}
|
2014-01-28 17:24:26 -05:00
|
|
|
return FALSE;
|
|
|
|
|
}
|
2008-04-07 Dan Williams <dcbw@redhat.com>
* include/NetworkManager.h
- Remove the DOWN and CANCELLED device states
- Add UNMANAGED and UNAVAILABLE device states
- Document the device states
* introspection/nm-device.xml
src/nm-device-interface.c
src/nm-device-interface.h
- Add the 'managed' property
* test/nm-tool.c
- (detail_device): print out device state
* src/NetworkManagerSystem.h
src/backends/NetworkManagerArch.c
src/backends/NetworkManagerDebian.c
src/backends/NetworkManagerFrugalware.c
src/backends/NetworkManagerGentoo.c
src/backends/NetworkManagerMandriva.c
src/backends/NetworkManagerPaldo.c
src/backends/NetworkManagerRedHat.c
src/backends/NetworkManagerSlackware.c
src/backends/NetworkManagerSuSE.c
- (nm_system_device_get_system_config, nm_system_device_get_disabled
nm_system_device_free_system_config): remove; they were unused and
their functionality should be re-implemented in each distro's
system settings service plugin
* src/nm-gsm-device.c
src/nm-gsm-device.h
src/nm-cdma-device.c
src/nm-cdma-device.h
- (*_new): take the 'managed' argument
* src/nm-device.c
- (nm_device_set_address): remove, fold into nm_device_bring_up()
- (nm_device_init): start in unmanaged state, not disconnected
- (constructor): don't start device until the system settings service
has had a chance to figure out if the device is managed or not
- (nm_device_deactivate, nm_device_bring_up, nm_device_bring_down):
don't set device state here, let callers handle that as appropriate
- (nm_device_dispose): don't touch the device if it's not managed
- (set_property, get_property, nm_device_class_init): implement the
'managed' property
- (nm_device_state_changed): bring the device up if its now managed,
and deactivate it if it used to be active
- (nm_device_get_managed, nm_device_set_managed): do the right thing
with the managed state
* src/nm-hal-manager.c
- (wired_device_creator, wireless_device_creator, modem_device_creator):
take initial managed state and pass it along to device constructors
- (create_device_and_add_to_list): get managed state and pass to
type creators
* src/nm-device-802-11-wireless.c
- (real_can_activate): fold in most of
nm_device_802_11_wireless_can_activate()
- (can_scan): can't scan in UNAVAILABLE or UNMANAGED
- (link_timeout_cb): instead of deactivating, change device state and
let the device state handler to it
- (real_update_hw_address): clean up
- (state_changed_cb): when entering UNAVAILABLE state, schedule an idle
handler to transition to DISCONNECTED if the device isn't rfkilled
* src/nm-device-802-3-ethernet.c
- (set_carrier): move above callers and get rid of prototype
- (device_state_changed): when entering UNAVAILABLE state, schedule an
idle handler to transition to DISCONNECTED if the device has a
carrier
- (real_update_hw_address): clean up
- (link_timeout_cb, ppp_state_changed): change state instead of calling
deactivation directly as deactivation doesn't change state anymore
* src/NetworkManagerPolicy.c
- (schedule_activate_check): yay, remove wireless_enabled hack since
the NMManager and wireless devices work that out themselves now
- (device_state_changed): change to a switch and update for new device
states
- (device_carrier_changed): remove; device handles this now through
state changes
- (device_added): don't care about carrier any more; the initial
activation check will happen when the device transitions to
DISCONNECTED
* src/nm-manager.c
- (dispose): clear unmanaged devices
- (handle_unmanaged_devices): update unmanaged device list and toggle
the managed property on each device when needed
- (system_settings_properties_changed_cb): handle signals from the
system settings service
- (system_settings_get_unmanaged_devices_cb): handle callback from
getting the unmanaged device list method call
- (query_unmanaged_devices): ask the system settings service for its
list of unmanaged devices
- (nm_manager_name_owner_changed, initial_get_connections): get unmanaged
devices
- (manager_set_wireless_enabled): push rfkill state down to wireless
devices directly and let them handle the necessary state transitions
- (manager_device_state_changed): update for new device states
- (nm_manager_add_device): set initial rfkill state on wireless devices
- (nm_manager_remove_device): don't touch the device if it's unmanaged
- (nm_manager_activate_connection): return error if the device is
unmanaged
- (nm_manager_sleep): handle new device states correctly; don't change
the state of unavailable/unmanaged devices
* libnm-glib/nm-device-802-11-wireless.c
- (state_changed_cb): update for new device states
git-svn-id: http://svn-archive.gnome.org/svn/NetworkManager/trunk@3540 4912f4e0-d625-0410-9fb7-b9a5a253dbdc
2008-04-08 02:58:02 +00:00
|
|
|
}
|
2012-02-26 17:27:42 -06:00
|
|
|
|
2017-01-25 13:27:16 +01:00
|
|
|
/* Now that we're activating a slave for that master, make sure the master just
|
|
|
|
|
* decides to go unmanaged while we're activating (perhaps because other slaves
|
|
|
|
|
* go away leaving him with no kids).
|
|
|
|
|
*/
|
2017-01-26 14:18:20 +01:00
|
|
|
if (master_device) {
|
|
|
|
|
nm_device_set_unmanaged_by_flags (master_device, NM_UNMANAGED_EXTERNAL_DOWN,
|
|
|
|
|
NM_UNMAN_FLAG_OP_FORGET, NM_DEVICE_STATE_REASON_USER_REQUESTED);
|
|
|
|
|
}
|
2017-01-25 13:27:16 +01:00
|
|
|
|
2013-08-28 16:19:20 -05:00
|
|
|
nm_active_connection_set_master (active, master_ac);
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGD (LOGD_CORE, "Activation of '%s' depends on active connection %p %s",
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
nm_settings_connection_get_id (sett_conn),
|
2016-03-02 11:38:26 +01:00
|
|
|
master_ac,
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
nm_dbus_object_get_path (NM_DBUS_OBJECT (master_ac)) ?: "");
|
2008-03-26 13:43:01 +00:00
|
|
|
}
|
|
|
|
|
|
2015-05-06 14:38:55 +02:00
|
|
|
/* Check slaves for master connection and possibly activate them */
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
autoconnect_slaves (self, sett_conn, device, nm_active_connection_get_subject (active));
|
2015-05-06 14:38:55 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
multi_connect = _nm_connection_get_multi_connect (nm_settings_connection_get_connection (sett_conn));
|
2018-04-18 11:08:05 +02:00
|
|
|
if ( multi_connect == NM_CONNECTION_MULTI_CONNECT_MULTIPLE
|
|
|
|
|
|| ( multi_connect == NM_CONNECTION_MULTI_CONNECT_MANUAL_MULTIPLE
|
|
|
|
|
&& NM_IN_SET (nm_active_connection_get_activation_reason (active),
|
|
|
|
|
NM_ACTIVATION_REASON_ASSUME,
|
|
|
|
|
NM_ACTIVATION_REASON_AUTOCONNECT_SLAVES,
|
|
|
|
|
NM_ACTIVATION_REASON_USER_REQUEST))) {
|
|
|
|
|
/* the profile can be activated multiple times. Proceed. */
|
|
|
|
|
} else {
|
2018-04-22 14:45:40 +02:00
|
|
|
gs_unref_ptrarray GPtrArray *all_ac_arr = NULL;
|
|
|
|
|
NMActiveConnection *ac;
|
|
|
|
|
guint i, n_all;
|
|
|
|
|
|
|
|
|
|
/* Disconnect the connection if already connected or queued for activation.
|
|
|
|
|
* The connection cannot be active multiple times (at the same time). */
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
ac = active_connection_find (self, sett_conn, NULL, NM_ACTIVE_CONNECTION_STATE_ACTIVATED,
|
2018-04-22 14:45:40 +02:00
|
|
|
&all_ac_arr);
|
|
|
|
|
if (ac) {
|
|
|
|
|
n_all = all_ac_arr ? all_ac_arr->len : ((guint) 1);
|
|
|
|
|
for (i = 0; i < n_all; i++) {
|
|
|
|
|
nm_device_disconnect_active_connection ( all_ac_arr
|
|
|
|
|
? all_ac_arr->pdata[i]
|
2018-11-20 14:18:01 +01:00
|
|
|
: ac,
|
2018-11-20 14:25:42 +01:00
|
|
|
NM_DEVICE_STATE_REASON_NEW_ACTIVATION,
|
|
|
|
|
NM_ACTIVE_CONNECTION_STATE_REASON_UNKNOWN);
|
2018-04-22 14:45:40 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-02-03 16:15:37 +01:00
|
|
|
|
2016-03-24 15:20:44 +01:00
|
|
|
/* If the device is there, we can ready it for the activation. */
|
2018-02-05 15:06:24 +01:00
|
|
|
if (nm_device_is_real (device)) {
|
2016-03-24 15:20:44 +01:00
|
|
|
unmanaged_to_disconnected (device);
|
2014-10-15 21:17:45 -05:00
|
|
|
|
2018-02-05 15:06:24 +01:00
|
|
|
if (!nm_device_get_managed (device, FALSE)) {
|
|
|
|
|
/* Unexpectedly, the device is still unmanaged. That can happen for example,
|
|
|
|
|
* if the device is forcibly unmanaged due to NM_UNMANAGED_USER_SETTINGS. */
|
|
|
|
|
g_set_error_literal (error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_DEPENDENCY_FAILED,
|
|
|
|
|
"Activation failed because the device is unmanaged");
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-03-20 11:22:19 -05:00
|
|
|
/* Export the new ActiveConnection to clients and start it on the device */
|
2018-02-05 15:17:06 +01:00
|
|
|
active_connection_add (self, active);
|
core: queue re-activations to allow DEACTIVATING state
If a device is already activated, queue the new activation to allow
the transition through the DEACTIVATING state.
---
Also remove the "HACK" bits in nm_device_deactivate(). This hack was
added on 2007-09-25 in commit 9c2848d. At the time, with user settings
services, if a client created a connection and requested that NM
activate it, NM may not have read the connection from the client over
D-Bus yet. So NM created a "deferred" activation request which waited
until the connection was read from the client, and then began activation.
The Policy watched for device state changes and other events (like
it does now) and activated a new device if the old one was no longer
valid. It specifically checked for deferred activations and then
did nothing. However, when the client's connection was read, then
nm-device.c cleared the deferred activation bit, leading to a short
period of time where the device was in DISCONNECTED state but there
was no deferred activation, because the device only changes state to
PREPARE from the idle handler for stage1. If other events happened
during this time, the policy would tear down the device that was
about to be activated. This early state transition to PREPARE
worked around that.
We need to remove it now though, because (a) the reason for its
existence is no longer valid, and (b) _device_activate() may now
be called from inside nm_device_state_changed() and thus it cannot
change to a new state inside the function.
2014-02-17 17:16:08 -06:00
|
|
|
nm_device_queue_activation (device, NM_ACT_REQUEST (active));
|
2013-08-28 16:19:20 -05:00
|
|
|
return TRUE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
_internal_activate_generic (NMManager *self, NMActiveConnection *active, GError **error)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
gboolean success = FALSE;
|
|
|
|
|
|
2014-02-26 13:18:16 -06:00
|
|
|
/* Ensure activation request is still valid, eg that its device hasn't gone
|
|
|
|
|
* away or that some other dependency has not failed.
|
|
|
|
|
*/
|
|
|
|
|
if (nm_active_connection_get_state (active) >= NM_ACTIVE_CONNECTION_STATE_DEACTIVATING) {
|
|
|
|
|
g_set_error_literal (error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_DEPENDENCY_FAILED,
|
|
|
|
|
"Activation failed because dependencies failed.");
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-28 16:19:20 -05:00
|
|
|
if (NM_IS_VPN_CONNECTION (active))
|
|
|
|
|
success = _internal_activate_vpn (self, active, error);
|
|
|
|
|
else
|
|
|
|
|
success = _internal_activate_device (self, active, error);
|
|
|
|
|
|
|
|
|
|
if (success) {
|
|
|
|
|
/* Force an update of the Manager's activating-connection property.
|
|
|
|
|
* The device changes state before the AC gets exported, which causes
|
|
|
|
|
* the manager's 'activating-connection' property to be NULL since the
|
|
|
|
|
* AC only gets a D-Bus path when it's exported. So now that the AC
|
|
|
|
|
* is exported, make sure the manager's activating-connection property
|
|
|
|
|
* is up-to-date.
|
|
|
|
|
*/
|
2018-06-28 18:05:05 +02:00
|
|
|
policy_activating_ac_changed (G_OBJECT (priv->policy), NULL, self);
|
2013-04-22 13:29:36 -05:00
|
|
|
}
|
2012-08-22 17:11:31 -05:00
|
|
|
|
2013-08-28 16:19:20 -05:00
|
|
|
return success;
|
2008-03-26 13:43:01 +00:00
|
|
|
}
|
|
|
|
|
|
2013-08-28 16:19:20 -05:00
|
|
|
static NMActiveConnection *
|
|
|
|
|
_new_active_connection (NMManager *self,
|
2018-04-12 11:32:18 +02:00
|
|
|
gboolean is_vpn,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *sett_conn,
|
|
|
|
|
NMConnection *incompl_conn,
|
2016-09-07 17:47:26 +02:00
|
|
|
NMConnection *applied,
|
2013-08-28 16:19:20 -05:00
|
|
|
const char *specific_object,
|
|
|
|
|
NMDevice *device,
|
|
|
|
|
NMAuthSubject *subject,
|
2017-03-07 11:04:36 +01:00
|
|
|
NMActivationType activation_type,
|
2018-03-28 17:18:04 +02:00
|
|
|
NMActivationReason activation_reason,
|
2013-08-28 16:19:20 -05:00
|
|
|
GError **error)
|
|
|
|
|
{
|
2018-04-12 13:37:40 +02:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2018-04-19 15:30:31 +02:00
|
|
|
NMDevice *parent_device;
|
2013-09-12 10:28:21 -04:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
nm_assert (!sett_conn || NM_IS_SETTINGS_CONNECTION (sett_conn));
|
|
|
|
|
nm_assert (!incompl_conn || NM_IS_CONNECTION (incompl_conn));
|
|
|
|
|
nm_assert ((!incompl_conn) ^ (!sett_conn));
|
|
|
|
|
nm_assert (NM_IS_AUTH_SUBJECT (subject));
|
|
|
|
|
nm_assert (is_vpn == _connection_is_vpn (sett_conn
|
|
|
|
|
? nm_settings_connection_get_connection (sett_conn)
|
|
|
|
|
: incompl_conn));
|
2018-04-19 15:30:31 +02:00
|
|
|
nm_assert (is_vpn || NM_IS_DEVICE (device));
|
2018-04-12 14:02:39 +02:00
|
|
|
nm_assert (!nm_streq0 (specific_object, "/"));
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
nm_assert (!applied || NM_IS_CONNECTION (applied));
|
|
|
|
|
nm_assert (!is_vpn || !applied);
|
2018-04-12 14:02:39 +02:00
|
|
|
|
2018-04-12 11:32:18 +02:00
|
|
|
if (is_vpn) {
|
2018-04-12 14:02:39 +02:00
|
|
|
NMActiveConnection *parent;
|
|
|
|
|
|
2018-04-12 11:32:18 +02:00
|
|
|
/* FIXME: for VPN connections, we don't allow re-activating an
|
|
|
|
|
* already active connection. It's a bug, and should be fixed together
|
|
|
|
|
* when reworking VPN handling. */
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (active_connection_find_by_connection (self,
|
|
|
|
|
sett_conn,
|
|
|
|
|
incompl_conn,
|
|
|
|
|
NM_ACTIVE_CONNECTION_STATE_ACTIVATED,
|
|
|
|
|
NULL)) {
|
2018-04-12 11:32:18 +02:00
|
|
|
g_set_error (error, NM_MANAGER_ERROR, NM_MANAGER_ERROR_CONNECTION_ALREADY_ACTIVE,
|
|
|
|
|
"Connection '%s' is already active",
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
sett_conn ? nm_settings_connection_get_id (sett_conn) : nm_connection_get_id (incompl_conn));
|
2018-04-12 11:32:18 +02:00
|
|
|
return NULL;
|
|
|
|
|
}
|
2018-04-12 13:37:40 +02:00
|
|
|
|
2017-03-07 11:04:36 +01:00
|
|
|
if (activation_type != NM_ACTIVATION_TYPE_MANAGED)
|
|
|
|
|
g_return_val_if_reached (NULL);
|
2018-04-12 13:37:40 +02:00
|
|
|
|
|
|
|
|
if (specific_object) {
|
|
|
|
|
/* Find the specific connection the client requested we use */
|
|
|
|
|
parent = active_connection_get_by_path (self, specific_object);
|
|
|
|
|
if (!parent) {
|
|
|
|
|
g_set_error_literal (error, NM_MANAGER_ERROR, NM_MANAGER_ERROR_CONNECTION_NOT_ACTIVE,
|
|
|
|
|
"Base connection for VPN connection not active.");
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
} else
|
|
|
|
|
parent = priv->primary_connection;
|
|
|
|
|
|
|
|
|
|
if (!parent) {
|
|
|
|
|
g_set_error_literal (error, NM_MANAGER_ERROR, NM_MANAGER_ERROR_UNKNOWN_CONNECTION,
|
|
|
|
|
"Could not find source connection.");
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2018-04-19 15:30:31 +02:00
|
|
|
parent_device = nm_active_connection_get_device (parent);
|
|
|
|
|
if (!parent_device) {
|
2018-04-12 13:37:40 +02:00
|
|
|
g_set_error_literal (error, NM_MANAGER_ERROR, NM_MANAGER_ERROR_UNKNOWN_DEVICE,
|
|
|
|
|
"Source connection had no active device");
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2018-04-19 15:30:31 +02:00
|
|
|
if (device && device != parent_device) {
|
|
|
|
|
g_set_error_literal (error, NM_MANAGER_ERROR, NM_MANAGER_ERROR_UNKNOWN_DEVICE,
|
|
|
|
|
"The device doesn't match the active connection.");
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
return (NMActiveConnection *) nm_vpn_connection_new (sett_conn,
|
2018-04-19 15:30:31 +02:00
|
|
|
parent_device,
|
2018-04-12 13:37:40 +02:00
|
|
|
nm_dbus_object_get_path (NM_DBUS_OBJECT (parent)),
|
|
|
|
|
activation_reason,
|
|
|
|
|
subject);
|
2013-08-28 16:19:20 -05:00
|
|
|
}
|
|
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
return (NMActiveConnection *) nm_act_request_new (sett_conn,
|
2016-09-07 17:47:26 +02:00
|
|
|
applied,
|
2013-08-28 16:19:20 -05:00
|
|
|
specific_object,
|
|
|
|
|
subject,
|
2017-03-07 11:04:36 +01:00
|
|
|
activation_type,
|
2018-03-28 17:18:04 +02:00
|
|
|
activation_reason,
|
2013-08-28 16:19:20 -05:00
|
|
|
device);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2018-04-18 09:06:54 +02:00
|
|
|
_internal_activation_auth_done (NMManager *self,
|
|
|
|
|
NMActiveConnection *active,
|
2013-08-28 16:19:20 -05:00
|
|
|
gboolean success,
|
2018-04-18 09:06:54 +02:00
|
|
|
const char *error_desc)
|
2013-08-28 16:19:20 -05:00
|
|
|
{
|
2015-06-23 20:52:04 +02:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2017-11-23 21:30:09 +01:00
|
|
|
NMActiveConnection *ac;
|
|
|
|
|
gs_free_error GError *error = NULL;
|
2012-09-13 16:51:58 -05:00
|
|
|
|
2018-04-18 09:06:54 +02:00
|
|
|
nm_assert (NM_IS_ACTIVE_CONNECTION (active));
|
2015-06-23 20:52:04 +02:00
|
|
|
|
2018-04-12 15:40:32 +02:00
|
|
|
if (!success)
|
|
|
|
|
goto fail;
|
|
|
|
|
|
2018-04-19 13:28:38 +02:00
|
|
|
/* Don't continue with an autoconnect-activation if a more important activation
|
|
|
|
|
* already exists.
|
|
|
|
|
* We also check this earlier, but there we may fail to detect a duplicate
|
|
|
|
|
* if the existing active connection was undergoing authorization.
|
2017-06-16 10:59:35 +02:00
|
|
|
*/
|
2018-04-19 13:28:38 +02:00
|
|
|
if (NM_IN_SET (nm_active_connection_get_activation_reason (active), NM_ACTIVATION_REASON_EXTERNAL,
|
|
|
|
|
NM_ACTIVATION_REASON_ASSUME,
|
|
|
|
|
NM_ACTIVATION_REASON_AUTOCONNECT)) {
|
2017-11-23 21:30:09 +01:00
|
|
|
c_list_for_each_entry (ac, &priv->active_connections_lst_head, active_connections_lst) {
|
|
|
|
|
if ( nm_active_connection_get_device (ac) == nm_active_connection_get_device (active)
|
|
|
|
|
&& nm_active_connection_get_settings_connection (ac) == nm_active_connection_get_settings_connection (active)
|
|
|
|
|
&& NM_IN_SET (nm_active_connection_get_state (ac),
|
2017-06-16 10:59:35 +02:00
|
|
|
NM_ACTIVE_CONNECTION_STATE_ACTIVATING,
|
|
|
|
|
NM_ACTIVE_CONNECTION_STATE_ACTIVATED)) {
|
|
|
|
|
g_set_error (&error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_CONNECTION_ALREADY_ACTIVE,
|
|
|
|
|
"Connection '%s' is already active",
|
|
|
|
|
nm_active_connection_get_settings_connection_id (active));
|
2018-04-12 15:40:32 +02:00
|
|
|
goto fail;
|
2017-06-16 10:59:35 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-04-12 15:40:32 +02:00
|
|
|
if (_internal_activate_generic (self, active, &error))
|
|
|
|
|
return;
|
2007-02-08 15:34:26 +00:00
|
|
|
|
2018-04-12 15:40:32 +02:00
|
|
|
fail:
|
2017-11-23 21:30:09 +01:00
|
|
|
nm_assert (error_desc || error);
|
2018-02-05 16:41:10 +01:00
|
|
|
nm_active_connection_set_state_fail (active,
|
|
|
|
|
NM_ACTIVE_CONNECTION_STATE_REASON_UNKNOWN,
|
|
|
|
|
error_desc ?: error->message);
|
2013-08-28 16:19:20 -05:00
|
|
|
}
|
|
|
|
|
|
2014-02-26 16:04:45 -06:00
|
|
|
/**
|
|
|
|
|
* nm_manager_activate_connection():
|
|
|
|
|
* @self: the #NMManager
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
* @sett_conn: the #NMSettingsConnection to activate on @device
|
2016-09-07 17:47:26 +02:00
|
|
|
* @applied: (allow-none): the applied connection to activate on @device
|
2014-02-26 16:04:45 -06:00
|
|
|
* @specific_object: the specific object path, if any, for the activation
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
* @device: the #NMDevice to activate @sett_conn on. Can be %NULL for VPNs.
|
2014-02-26 16:04:45 -06:00
|
|
|
* @subject: the subject which requested activation
|
2017-03-07 11:04:36 +01:00
|
|
|
* @activation_type: whether to assume the connection. That is, take over gracefully,
|
|
|
|
|
* non-destructible.
|
2018-03-28 17:18:04 +02:00
|
|
|
* @activation_reason: the reason for activation
|
2014-02-26 16:04:45 -06:00
|
|
|
* @error: return location for an error
|
|
|
|
|
*
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
* Begins a new internally-initiated activation of @sett_conn on @device.
|
2014-02-26 16:04:45 -06:00
|
|
|
* @subject should be the subject of the activation that triggered this
|
|
|
|
|
* one, or if this is an autoconnect request, a new internal subject.
|
|
|
|
|
* The returned #NMActiveConnection is owned by the Manager and should be
|
2016-09-07 17:47:26 +02:00
|
|
|
* referenced by the caller if the caller continues to use it. If @applied
|
|
|
|
|
* is supplied, it shall not be modified by the caller afterwards.
|
2014-02-26 16:04:45 -06:00
|
|
|
*
|
|
|
|
|
* Returns: (transfer none): the new #NMActiveConnection that tracks
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
* activation of @sett_conn on @device
|
2014-02-26 16:04:45 -06:00
|
|
|
*/
|
2013-08-28 16:19:20 -05:00
|
|
|
NMActiveConnection *
|
|
|
|
|
nm_manager_activate_connection (NMManager *self,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *sett_conn,
|
2016-09-07 17:47:26 +02:00
|
|
|
NMConnection *applied,
|
2013-08-28 16:19:20 -05:00
|
|
|
const char *specific_object,
|
|
|
|
|
NMDevice *device,
|
|
|
|
|
NMAuthSubject *subject,
|
2017-03-07 11:04:36 +01:00
|
|
|
NMActivationType activation_type,
|
2018-03-28 17:18:04 +02:00
|
|
|
NMActivationReason activation_reason,
|
2013-08-28 16:19:20 -05:00
|
|
|
GError **error)
|
|
|
|
|
{
|
2018-04-12 13:37:40 +02:00
|
|
|
NMManagerPrivate *priv;
|
2013-08-28 16:19:20 -05:00
|
|
|
NMActiveConnection *active;
|
2018-04-18 09:06:54 +02:00
|
|
|
AsyncOpData *async_op_data;
|
2018-04-30 09:50:04 +02:00
|
|
|
gboolean is_vpn;
|
2013-08-28 16:19:20 -05:00
|
|
|
|
2018-04-12 13:37:40 +02:00
|
|
|
g_return_val_if_fail (NM_IS_MANAGER (self), NULL);
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
g_return_val_if_fail (NM_IS_SETTINGS_CONNECTION (sett_conn), NULL);
|
|
|
|
|
is_vpn = _connection_is_vpn (nm_settings_connection_get_connection (sett_conn));
|
2018-04-30 09:50:04 +02:00
|
|
|
g_return_val_if_fail (is_vpn || NM_IS_DEVICE (device), NULL);
|
2018-04-12 09:48:16 +02:00
|
|
|
g_return_val_if_fail (!error || !*error, NULL);
|
2018-04-12 14:02:39 +02:00
|
|
|
nm_assert (!nm_streq0 (specific_object, "/"));
|
2013-08-28 16:19:20 -05:00
|
|
|
|
2018-04-12 13:37:40 +02:00
|
|
|
priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (!nm_auth_is_subject_in_acl_set_error (nm_settings_connection_get_connection (sett_conn),
|
2018-04-12 09:48:16 +02:00
|
|
|
subject,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
error))
|
2014-01-21 13:41:18 -06:00
|
|
|
return NULL;
|
|
|
|
|
|
2015-06-23 20:52:04 +02:00
|
|
|
/* Look for a active connection that's equivalent and is already pending authorization
|
|
|
|
|
* and eventual activation. This is used to de-duplicate concurrent activations which would
|
|
|
|
|
* otherwise race and cause the device to disconnect and reconnect repeatedly.
|
|
|
|
|
* In particular, this allows the master and multiple slaves to concurrently auto-activate
|
|
|
|
|
* while all the slaves would use the same active-connection. */
|
2018-04-18 09:06:54 +02:00
|
|
|
c_list_for_each_entry (async_op_data, &priv->async_op_lst_head, async_op_lst) {
|
2015-06-23 20:52:04 +02:00
|
|
|
|
2018-04-18 09:06:54 +02:00
|
|
|
if (async_op_data->async_op_type != ASYNC_OP_TYPE_AC_AUTH_ACTIVATE_INTERNAL)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
active = async_op_data->ac_auth.active;
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if ( sett_conn == nm_active_connection_get_settings_connection (active)
|
2018-04-12 14:02:39 +02:00
|
|
|
&& nm_streq0 (nm_active_connection_get_specific_object (active), specific_object)
|
2018-04-30 09:50:04 +02:00
|
|
|
&& (!device || nm_active_connection_get_device (active) == device)
|
2015-06-23 20:52:04 +02:00
|
|
|
&& nm_auth_subject_is_internal (nm_active_connection_get_subject (active))
|
2018-04-12 15:47:40 +02:00
|
|
|
&& nm_auth_subject_is_internal (subject)
|
|
|
|
|
&& nm_active_connection_get_activation_reason (active) == activation_reason)
|
2015-06-23 20:52:04 +02:00
|
|
|
return active;
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-28 16:19:20 -05:00
|
|
|
active = _new_active_connection (self,
|
2018-04-30 09:50:04 +02:00
|
|
|
is_vpn,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
sett_conn,
|
|
|
|
|
NULL,
|
2016-09-07 17:47:26 +02:00
|
|
|
applied,
|
2013-08-28 16:19:20 -05:00
|
|
|
specific_object,
|
|
|
|
|
device,
|
|
|
|
|
subject,
|
2017-03-07 11:04:36 +01:00
|
|
|
activation_type,
|
2018-03-28 17:18:04 +02:00
|
|
|
activation_reason,
|
2013-08-28 16:19:20 -05:00
|
|
|
error);
|
2018-04-12 15:40:32 +02:00
|
|
|
if (!active)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
2018-04-18 09:06:54 +02:00
|
|
|
nm_active_connection_authorize (active,
|
|
|
|
|
NULL,
|
|
|
|
|
_async_op_complete_ac_auth_cb,
|
|
|
|
|
_async_op_data_new_authorize_activate_internal (self,
|
|
|
|
|
active));
|
2013-08-28 16:19:20 -05:00
|
|
|
return active;
|
2007-10-01 15:38:39 +00:00
|
|
|
}
|
|
|
|
|
|
2014-10-15 21:17:45 -05:00
|
|
|
/**
|
|
|
|
|
* validate_activation_request:
|
|
|
|
|
* @self: the #NMManager
|
|
|
|
|
* @context: the D-Bus context of the requestor
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
* @sett_conn: the #NMSettingsConnection to be activated, or %NULL if there
|
|
|
|
|
* is only a partial activation.
|
|
|
|
|
* @connection: the partial #NMConnection to be activated (if @sett_conn is unspecified)
|
2018-04-11 17:05:00 +02:00
|
|
|
* @device_path: the object path of the device to be activated, or NULL
|
2018-09-15 07:20:54 -04:00
|
|
|
* @out_device: on successful return, the #NMDevice to be activated with @connection
|
2018-04-12 10:34:11 +02:00
|
|
|
* The caller may pass in a device which shortcuts the lookup by path.
|
|
|
|
|
* In this case, the passed in device must have the matching @device_path
|
|
|
|
|
* already.
|
2018-04-12 11:15:42 +02:00
|
|
|
* @out_is_vpn: on successful return, %TRUE if @connection is a VPN connection
|
2014-10-15 21:17:45 -05:00
|
|
|
* @error: location to store an error on failure
|
|
|
|
|
*
|
|
|
|
|
* Performs basic validation on an activation request, including ensuring that
|
|
|
|
|
* the requestor is a valid Unix process, is not disallowed in @connection
|
|
|
|
|
* permissions, and that a device exists that can activate @connection.
|
|
|
|
|
*
|
|
|
|
|
* Returns: on success, the #NMAuthSubject representing the requestor, or
|
|
|
|
|
* %NULL on error
|
|
|
|
|
*/
|
2013-07-29 12:42:16 -05:00
|
|
|
static NMAuthSubject *
|
2012-09-13 13:17:46 -05:00
|
|
|
validate_activation_request (NMManager *self,
|
2015-04-15 14:53:30 -04:00
|
|
|
GDBusMethodInvocation *context,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *sett_conn,
|
2012-09-13 13:17:46 -05:00
|
|
|
NMConnection *connection,
|
|
|
|
|
const char *device_path,
|
|
|
|
|
NMDevice **out_device,
|
2018-04-12 11:15:42 +02:00
|
|
|
gboolean *out_is_vpn,
|
2012-09-13 13:17:46 -05:00
|
|
|
GError **error)
|
|
|
|
|
{
|
|
|
|
|
NMDevice *device = NULL;
|
2018-04-12 11:15:42 +02:00
|
|
|
gboolean is_vpn = FALSE;
|
2018-04-19 11:52:19 +02:00
|
|
|
gs_unref_object NMAuthSubject *subject = NULL;
|
2012-09-13 13:17:46 -05:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
nm_assert (!sett_conn || NM_IS_SETTINGS_CONNECTION (sett_conn));
|
|
|
|
|
nm_assert (!connection || NM_IS_CONNECTION (connection));
|
|
|
|
|
nm_assert (sett_conn || connection);
|
|
|
|
|
nm_assert (!connection || !sett_conn || connection == nm_settings_connection_get_connection (sett_conn));
|
2018-04-11 17:35:23 +02:00
|
|
|
nm_assert (out_device);
|
2018-04-12 11:15:42 +02:00
|
|
|
nm_assert (out_is_vpn);
|
2012-09-13 13:17:46 -05:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (!connection)
|
|
|
|
|
connection = nm_settings_connection_get_connection (sett_conn);
|
|
|
|
|
|
2013-07-29 12:42:16 -05:00
|
|
|
/* Validate the caller */
|
2014-08-14 13:34:57 +02:00
|
|
|
subject = nm_auth_subject_new_unix_process_from_context (context);
|
2013-07-29 12:42:16 -05:00
|
|
|
if (!subject) {
|
2012-09-13 16:51:58 -05:00
|
|
|
g_set_error_literal (error,
|
2013-07-29 12:42:16 -05:00
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
2012-09-13 16:51:58 -05:00
|
|
|
"Failed to get request UID.");
|
2013-07-29 12:42:16 -05:00
|
|
|
return NULL;
|
2012-09-13 16:51:58 -05:00
|
|
|
}
|
|
|
|
|
|
2018-04-12 09:48:16 +02:00
|
|
|
if (!nm_auth_is_subject_in_acl_set_error (connection,
|
|
|
|
|
subject,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
error))
|
2018-04-12 10:34:11 +02:00
|
|
|
return NULL;
|
2014-01-17 11:18:23 -06:00
|
|
|
|
2018-04-12 11:19:06 +02:00
|
|
|
is_vpn = _connection_is_vpn (connection);
|
2012-09-13 13:17:46 -05:00
|
|
|
|
2018-04-12 10:34:11 +02:00
|
|
|
if (*out_device) {
|
|
|
|
|
device = *out_device;
|
|
|
|
|
nm_assert (NM_IS_DEVICE (device));
|
|
|
|
|
nm_assert (device_path);
|
|
|
|
|
nm_assert (nm_streq0 (device_path, nm_dbus_object_get_path (NM_DBUS_OBJECT (device))));
|
|
|
|
|
nm_assert (device == nm_manager_get_device_by_path (self, device_path));
|
|
|
|
|
} else if (device_path) {
|
2012-09-13 13:17:46 -05:00
|
|
|
device = nm_manager_get_device_by_path (self, device_path);
|
2013-08-28 16:19:20 -05:00
|
|
|
if (!device) {
|
|
|
|
|
g_set_error_literal (error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_UNKNOWN_DEVICE,
|
|
|
|
|
"Device not found");
|
2018-04-12 10:34:11 +02:00
|
|
|
return NULL;
|
2013-08-28 16:19:20 -05:00
|
|
|
}
|
2018-04-12 13:37:40 +02:00
|
|
|
} else if (!is_vpn) {
|
2018-07-10 11:17:05 +02:00
|
|
|
gs_free_error GError *local = NULL;
|
|
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
device = nm_manager_get_best_device_for_connection (self, sett_conn, connection, TRUE, NULL, &local);
|
2018-04-12 13:37:40 +02:00
|
|
|
if (!device) {
|
2018-04-12 10:34:11 +02:00
|
|
|
gs_free char *iface = NULL;
|
|
|
|
|
|
|
|
|
|
/* VPN and software-device connections don't need a device yet,
|
|
|
|
|
* but non-virtual connections do ... */
|
|
|
|
|
if (!nm_connection_is_virtual (connection)) {
|
2018-07-10 11:17:05 +02:00
|
|
|
g_set_error (error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_UNKNOWN_DEVICE,
|
|
|
|
|
"No suitable device found for this connection (%s).",
|
|
|
|
|
local->message);
|
2018-04-12 10:34:11 +02:00
|
|
|
return NULL;
|
|
|
|
|
}
|
2015-02-03 16:15:37 +01:00
|
|
|
|
2018-04-12 10:34:11 +02:00
|
|
|
/* Look for an existing device with the connection's interface name */
|
|
|
|
|
iface = nm_manager_get_connection_iface (self, connection, NULL, error);
|
|
|
|
|
if (!iface)
|
|
|
|
|
return NULL;
|
2012-09-13 13:17:46 -05:00
|
|
|
|
2018-04-12 10:34:11 +02:00
|
|
|
device = find_device_by_iface (self, iface, connection, NULL);
|
|
|
|
|
if (!device) {
|
|
|
|
|
g_set_error_literal (error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_UNKNOWN_DEVICE,
|
|
|
|
|
"Failed to find a compatible device for this connection");
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
2013-08-28 16:19:20 -05:00
|
|
|
}
|
2014-10-15 21:17:45 -05:00
|
|
|
}
|
|
|
|
|
|
2018-07-06 15:54:16 +02:00
|
|
|
nm_assert (is_vpn || NM_IS_DEVICE (device));
|
2018-04-12 13:37:40 +02:00
|
|
|
|
2013-08-28 16:19:20 -05:00
|
|
|
*out_device = device;
|
2018-04-12 11:15:42 +02:00
|
|
|
*out_is_vpn = is_vpn;
|
2018-04-12 10:34:11 +02:00
|
|
|
return g_steal_pointer (&subject);
|
2012-09-13 13:17:46 -05:00
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2012-09-13 16:51:58 -05:00
|
|
|
|
2010-06-03 23:20:11 -07:00
|
|
|
static void
|
2018-04-18 09:06:54 +02:00
|
|
|
_activation_auth_done (NMManager *self,
|
|
|
|
|
NMActiveConnection *active,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
2014-02-26 16:04:45 -06:00
|
|
|
gboolean success,
|
2018-04-18 09:06:54 +02:00
|
|
|
const char *error_desc)
|
2013-08-28 16:19:20 -05:00
|
|
|
{
|
|
|
|
|
GError *error = NULL;
|
2015-07-14 10:26:54 +02:00
|
|
|
NMAuthSubject *subject;
|
2015-07-14 16:53:24 +02:00
|
|
|
NMSettingsConnection *connection;
|
2018-04-17 14:59:27 +02:00
|
|
|
|
2015-07-14 10:26:54 +02:00
|
|
|
subject = nm_active_connection_get_subject (active);
|
2015-07-14 16:53:24 +02:00
|
|
|
connection = nm_active_connection_get_settings_connection (active);
|
2012-09-13 16:51:58 -05:00
|
|
|
|
2018-04-12 15:40:32 +02:00
|
|
|
if (!success) {
|
2013-08-28 16:19:20 -05:00
|
|
|
error = g_error_new_literal (NM_MANAGER_ERROR,
|
2016-01-12 09:46:28 +01:00
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
error_desc);
|
2018-04-12 15:40:32 +02:00
|
|
|
goto fail;
|
2012-09-13 16:51:58 -05:00
|
|
|
}
|
|
|
|
|
|
2018-04-12 15:40:32 +02:00
|
|
|
if (!_internal_activate_generic (self, active, &error))
|
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
|
|
nm_settings_connection_autoconnect_blocked_reason_set (connection,
|
|
|
|
|
NM_SETTINGS_AUTO_CONNECT_BLOCKED_REASON_USER_REQUEST,
|
|
|
|
|
FALSE);
|
2018-04-18 09:06:54 +02:00
|
|
|
g_dbus_method_invocation_return_value (invocation,
|
2018-04-12 15:40:32 +02:00
|
|
|
g_variant_new ("(o)",
|
|
|
|
|
nm_dbus_object_get_path (NM_DBUS_OBJECT (active))));
|
|
|
|
|
nm_audit_log_connection_op (NM_AUDIT_OP_CONN_ACTIVATE, connection, TRUE, NULL,
|
|
|
|
|
subject, NULL);
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
fail:
|
2016-04-20 12:10:55 +02:00
|
|
|
nm_audit_log_connection_op (NM_AUDIT_OP_CONN_ACTIVATE, connection, FALSE, NULL,
|
2015-07-14 10:26:54 +02:00
|
|
|
subject, error->message);
|
2018-02-05 16:41:10 +01:00
|
|
|
nm_active_connection_set_state_fail (active,
|
|
|
|
|
NM_ACTIVE_CONNECTION_STATE_REASON_UNKNOWN,
|
|
|
|
|
error->message);
|
2015-07-14 10:26:54 +02:00
|
|
|
|
2018-04-18 09:06:54 +02:00
|
|
|
g_dbus_method_invocation_take_error (invocation, error);
|
2008-11-21 18:11:15 +00:00
|
|
|
}
|
|
|
|
|
|
2007-10-01 15:38:39 +00:00
|
|
|
static void
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
impl_manager_activate_connection (NMDBusObject *obj,
|
|
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
|
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *dbus_connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
|
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER (obj);
|
2012-09-13 13:17:46 -05:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2018-02-05 14:51:27 +01:00
|
|
|
gs_unref_object NMActiveConnection *active = NULL;
|
|
|
|
|
gs_unref_object NMAuthSubject *subject = NULL;
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *sett_conn = NULL;
|
2012-09-13 15:57:36 -05:00
|
|
|
NMDevice *device = NULL;
|
2013-08-28 16:19:20 -05:00
|
|
|
gboolean is_vpn = FALSE;
|
2011-01-10 23:39:12 -06:00
|
|
|
GError *error = NULL;
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
const char *connection_path;
|
|
|
|
|
const char *device_path;
|
|
|
|
|
const char *specific_object_path;
|
|
|
|
|
|
|
|
|
|
g_variant_get (parameters, "(&o&o&o)", &connection_path, &device_path, &specific_object_path);
|
2007-10-16 18:59:36 +00:00
|
|
|
|
2018-04-11 17:05:00 +02:00
|
|
|
connection_path = nm_utils_dbus_normalize_object_path (connection_path);
|
|
|
|
|
specific_object_path = nm_utils_dbus_normalize_object_path (specific_object_path);
|
|
|
|
|
device_path = nm_utils_dbus_normalize_object_path (device_path);
|
2013-07-24 10:41:39 -05:00
|
|
|
|
|
|
|
|
/* If the connection path is given and valid, that connection is activated.
|
|
|
|
|
* Otherwise the "best" connection for the device is chosen and activated,
|
|
|
|
|
* regardless of whether that connection is autoconnect-enabled or not
|
|
|
|
|
* (since this is an explicit request, not an auto-activation request).
|
|
|
|
|
*/
|
2016-03-23 10:35:55 +01:00
|
|
|
if (connection_path) {
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
sett_conn = nm_settings_get_connection_by_path (priv->settings, connection_path);
|
|
|
|
|
if (!sett_conn) {
|
2016-03-23 10:35:55 +01:00
|
|
|
error = g_error_new_literal (NM_MANAGER_ERROR,
|
2017-06-01 17:03:46 +02:00
|
|
|
NM_MANAGER_ERROR_UNKNOWN_CONNECTION,
|
|
|
|
|
"Connection could not be found.");
|
2016-03-23 10:35:55 +01:00
|
|
|
goto error;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
2013-07-24 10:41:39 -05:00
|
|
|
/* If no connection is given, find a suitable connection for the given device path */
|
|
|
|
|
if (!device_path) {
|
|
|
|
|
error = g_error_new_literal (NM_MANAGER_ERROR, NM_MANAGER_ERROR_UNKNOWN_DEVICE,
|
|
|
|
|
"Only devices may be activated without a specifying a connection");
|
|
|
|
|
goto error;
|
|
|
|
|
}
|
|
|
|
|
device = nm_manager_get_device_by_path (self, device_path);
|
|
|
|
|
if (!device) {
|
|
|
|
|
error = g_error_new (NM_MANAGER_ERROR, NM_MANAGER_ERROR_UNKNOWN_DEVICE,
|
2016-03-30 09:00:06 +02:00
|
|
|
"Can not activate an unknown device '%s'", device_path);
|
2013-07-24 10:41:39 -05:00
|
|
|
goto error;
|
|
|
|
|
}
|
|
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
sett_conn = nm_device_get_best_connection (device, specific_object_path, &error);
|
|
|
|
|
if (!sett_conn)
|
2013-07-24 10:41:39 -05:00
|
|
|
goto error;
|
2012-09-13 13:17:46 -05:00
|
|
|
}
|
|
|
|
|
|
2013-07-29 12:42:16 -05:00
|
|
|
subject = validate_activation_request (self,
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
invocation,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
sett_conn,
|
|
|
|
|
NULL,
|
2013-07-29 12:42:16 -05:00
|
|
|
device_path,
|
|
|
|
|
&device,
|
2013-08-28 16:19:20 -05:00
|
|
|
&is_vpn,
|
2013-07-29 12:42:16 -05:00
|
|
|
&error);
|
|
|
|
|
if (!subject)
|
2012-09-13 13:17:46 -05:00
|
|
|
goto error;
|
|
|
|
|
|
2013-08-28 16:19:20 -05:00
|
|
|
active = _new_active_connection (self,
|
2018-04-12 11:32:18 +02:00
|
|
|
is_vpn,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
sett_conn,
|
|
|
|
|
NULL,
|
2016-09-07 17:47:26 +02:00
|
|
|
NULL,
|
2013-08-28 16:19:20 -05:00
|
|
|
specific_object_path,
|
|
|
|
|
device,
|
|
|
|
|
subject,
|
2017-03-07 11:04:36 +01:00
|
|
|
NM_ACTIVATION_TYPE_MANAGED,
|
2018-03-28 17:18:04 +02:00
|
|
|
NM_ACTIVATION_REASON_USER_REQUEST,
|
2013-08-28 16:19:20 -05:00
|
|
|
&error);
|
|
|
|
|
if (!active)
|
2013-07-29 12:42:16 -05:00
|
|
|
goto error;
|
2012-09-13 13:17:46 -05:00
|
|
|
|
2018-06-04 10:36:52 +02:00
|
|
|
nm_active_connection_authorize (active,
|
2018-02-05 14:51:27 +01:00
|
|
|
NULL,
|
2018-04-18 09:06:54 +02:00
|
|
|
_async_op_complete_ac_auth_cb,
|
|
|
|
|
_async_op_data_new_ac_auth_activate_user (self,
|
|
|
|
|
active,
|
|
|
|
|
invocation));
|
2018-06-04 10:36:52 +02:00
|
|
|
|
|
|
|
|
/* we passed the pointer on to _async_op_data_new_ac_auth_activate_user() */
|
|
|
|
|
g_steal_pointer (&active);
|
|
|
|
|
|
2013-07-29 12:42:16 -05:00
|
|
|
return;
|
|
|
|
|
|
2012-09-13 13:17:46 -05:00
|
|
|
error:
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (sett_conn) {
|
|
|
|
|
nm_audit_log_connection_op (NM_AUDIT_OP_CONN_ACTIVATE, sett_conn, FALSE, NULL,
|
2015-07-14 10:26:54 +02:00
|
|
|
subject, error->message);
|
|
|
|
|
}
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_dbus_method_invocation_take_error (invocation, error);
|
2011-01-10 23:39:12 -06:00
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2012-09-13 16:51:58 -05:00
|
|
|
|
2011-01-10 23:39:12 -06:00
|
|
|
static void
|
2015-07-14 16:53:24 +02:00
|
|
|
activation_add_done (NMSettings *settings,
|
2012-09-13 16:51:58 -05:00
|
|
|
NMSettingsConnection *new_connection,
|
2011-01-10 23:39:12 -06:00
|
|
|
GError *error,
|
2015-04-15 14:53:30 -04:00
|
|
|
GDBusMethodInvocation *context,
|
2015-07-15 14:44:45 +02:00
|
|
|
NMAuthSubject *subject,
|
2011-01-10 23:39:12 -06:00
|
|
|
gpointer user_data)
|
|
|
|
|
{
|
2015-07-14 16:53:24 +02:00
|
|
|
NMManager *self;
|
|
|
|
|
gs_unref_object NMActiveConnection *active = NULL;
|
2018-04-12 12:01:25 +02:00
|
|
|
gs_free_error GError *local = NULL;
|
2018-10-25 16:54:37 +02:00
|
|
|
gpointer persist_ptr;
|
|
|
|
|
NMSettingsConnectionPersistMode persist;
|
2011-01-10 23:39:12 -06:00
|
|
|
|
2018-10-25 16:54:37 +02:00
|
|
|
nm_utils_user_data_unpack (user_data, &self, &active, &persist_ptr);
|
|
|
|
|
persist = GPOINTER_TO_INT (persist_ptr);
|
2015-07-14 16:53:24 +02:00
|
|
|
|
2012-09-13 16:51:58 -05:00
|
|
|
if (!error) {
|
2015-07-14 16:53:24 +02:00
|
|
|
nm_active_connection_set_settings_connection (active, new_connection);
|
2013-08-28 16:19:20 -05:00
|
|
|
|
2015-07-14 16:53:24 +02:00
|
|
|
if (_internal_activate_generic (self, active, &local)) {
|
2017-12-01 10:09:43 +01:00
|
|
|
nm_settings_connection_update (new_connection,
|
|
|
|
|
NULL,
|
2018-10-25 16:54:37 +02:00
|
|
|
persist,
|
2017-12-01 10:09:43 +01:00
|
|
|
NM_SETTINGS_CONNECTION_COMMIT_REASON_USER_ACTION | NM_SETTINGS_CONNECTION_COMMIT_REASON_ID_CHANGED,
|
|
|
|
|
"add-and-activate",
|
|
|
|
|
NULL);
|
2015-04-15 14:53:30 -04:00
|
|
|
g_dbus_method_invocation_return_value (
|
2015-07-14 16:53:24 +02:00
|
|
|
context,
|
|
|
|
|
g_variant_new ("(oo)",
|
2018-03-29 11:40:27 +02:00
|
|
|
nm_dbus_object_get_path (NM_DBUS_OBJECT (new_connection)),
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
nm_dbus_object_get_path (NM_DBUS_OBJECT (active))));
|
2015-07-14 10:26:54 +02:00
|
|
|
nm_audit_log_connection_op (NM_AUDIT_OP_CONN_ADD_ACTIVATE,
|
2015-07-14 16:53:24 +02:00
|
|
|
nm_active_connection_get_settings_connection (active),
|
2015-07-14 10:26:54 +02:00
|
|
|
TRUE,
|
2016-04-20 12:10:55 +02:00
|
|
|
NULL,
|
2015-07-14 16:53:24 +02:00
|
|
|
nm_active_connection_get_subject (active),
|
2015-07-14 10:26:54 +02:00
|
|
|
NULL);
|
2015-07-14 16:53:24 +02:00
|
|
|
return;
|
2013-08-28 16:19:20 -05:00
|
|
|
}
|
|
|
|
|
error = local;
|
2011-01-10 23:39:12 -06:00
|
|
|
}
|
2012-09-13 16:51:58 -05:00
|
|
|
|
2018-02-05 16:41:10 +01:00
|
|
|
nm_assert (error);
|
2018-04-12 12:01:25 +02:00
|
|
|
|
2018-02-05 16:41:10 +01:00
|
|
|
nm_active_connection_set_state_fail (active,
|
|
|
|
|
NM_ACTIVE_CONNECTION_STATE_REASON_UNKNOWN,
|
|
|
|
|
error->message);
|
2016-12-23 14:02:58 +01:00
|
|
|
if (new_connection)
|
2017-10-19 11:22:14 +02:00
|
|
|
nm_settings_connection_delete (new_connection, NULL);
|
2015-04-15 14:53:30 -04:00
|
|
|
g_dbus_method_invocation_return_gerror (context, error);
|
2015-07-14 10:26:54 +02:00
|
|
|
nm_audit_log_connection_op (NM_AUDIT_OP_CONN_ADD_ACTIVATE,
|
2015-08-10 12:33:01 +02:00
|
|
|
NULL,
|
2015-07-14 10:26:54 +02:00
|
|
|
FALSE,
|
2016-04-20 12:10:55 +02:00
|
|
|
NULL,
|
2015-07-14 16:53:24 +02:00
|
|
|
nm_active_connection_get_subject (active),
|
2015-07-14 10:26:54 +02:00
|
|
|
error->message);
|
2011-01-10 23:39:12 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2018-04-18 09:06:54 +02:00
|
|
|
_add_and_activate_auth_done (NMManager *self,
|
|
|
|
|
NMActiveConnection *active,
|
|
|
|
|
NMConnection *connection,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
2018-10-25 16:54:37 +02:00
|
|
|
NMSettingsConnectionPersistMode persist,
|
2014-02-26 16:04:45 -06:00
|
|
|
gboolean success,
|
2018-04-18 09:06:54 +02:00
|
|
|
const char *error_desc)
|
2011-01-10 23:39:12 -06:00
|
|
|
{
|
2018-04-17 14:59:27 +02:00
|
|
|
NMManagerPrivate *priv;
|
2013-08-28 16:19:20 -05:00
|
|
|
GError *error = NULL;
|
2012-09-13 16:51:58 -05:00
|
|
|
|
2018-04-12 12:01:25 +02:00
|
|
|
if (!success) {
|
2013-08-28 16:19:20 -05:00
|
|
|
error = g_error_new_literal (NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
error_desc);
|
2015-07-14 10:26:54 +02:00
|
|
|
nm_audit_log_connection_op (NM_AUDIT_OP_CONN_ADD_ACTIVATE,
|
2015-08-10 12:33:01 +02:00
|
|
|
NULL,
|
2015-07-14 10:26:54 +02:00
|
|
|
FALSE,
|
2016-04-20 12:10:55 +02:00
|
|
|
NULL,
|
2015-07-14 10:26:54 +02:00
|
|
|
nm_active_connection_get_subject (active),
|
|
|
|
|
error->message);
|
2018-04-18 09:06:54 +02:00
|
|
|
g_dbus_method_invocation_take_error (invocation, error);
|
2018-04-12 12:01:25 +02:00
|
|
|
return;
|
2011-01-10 23:39:12 -06:00
|
|
|
}
|
2014-02-26 16:04:45 -06:00
|
|
|
|
2018-04-17 14:59:27 +02:00
|
|
|
priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
|
2018-04-18 10:23:22 +02:00
|
|
|
/* FIXME(shutdown): nm_settings_add_connection_dbus() cannot be cancelled. It should be made
|
|
|
|
|
* cancellable and tracked via AsyncOpData to be able to do a clean
|
|
|
|
|
* shutdown. */
|
2018-04-12 12:01:25 +02:00
|
|
|
nm_settings_add_connection_dbus (priv->settings,
|
|
|
|
|
connection,
|
|
|
|
|
FALSE,
|
2018-04-18 10:10:30 +02:00
|
|
|
nm_active_connection_get_subject (active),
|
2018-04-18 09:06:54 +02:00
|
|
|
invocation,
|
2018-04-12 12:01:25 +02:00
|
|
|
activation_add_done,
|
2018-04-17 15:29:10 +02:00
|
|
|
nm_utils_user_data_pack (self,
|
2018-10-25 16:54:37 +02:00
|
|
|
g_object_ref (active),
|
|
|
|
|
GINT_TO_POINTER (persist)));
|
2011-01-10 23:39:12 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
impl_manager_add_and_activate_connection (NMDBusObject *obj,
|
|
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
|
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *dbus_connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
|
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER (obj);
|
2012-09-13 13:17:46 -05:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
gs_unref_object NMConnection *incompl_conn = NULL;
|
2018-10-26 13:17:31 +02:00
|
|
|
gs_unref_object NMActiveConnection *active = NULL;
|
2018-04-12 12:01:25 +02:00
|
|
|
gs_unref_object NMAuthSubject *subject = NULL;
|
2011-01-10 23:39:12 -06:00
|
|
|
GError *error = NULL;
|
2012-09-13 13:17:46 -05:00
|
|
|
NMDevice *device = NULL;
|
2018-04-12 11:15:42 +02:00
|
|
|
gboolean is_vpn = FALSE;
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
gs_unref_variant GVariant *settings = NULL;
|
2018-10-30 16:40:40 +01:00
|
|
|
gs_unref_variant GVariant *options = NULL;
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
const char *device_path;
|
|
|
|
|
const char *specific_object_path;
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
gs_free NMConnection **conns = NULL;
|
2018-10-25 16:54:37 +02:00
|
|
|
NMSettingsConnectionPersistMode persist = NM_SETTINGS_CONNECTION_PERSIST_MODE_DISK;
|
2018-11-18 11:52:13 +01:00
|
|
|
gboolean bind_dbus_client = FALSE;
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
|
2018-10-30 16:40:40 +01:00
|
|
|
if (g_strcmp0 (method_info->parent.name, "AddAndActivateConnection2") == 0)
|
|
|
|
|
g_variant_get (parameters, "(@a{sa{sv}}&o&o@a{sv})", &settings, &device_path, &specific_object_path, &options);
|
|
|
|
|
else
|
|
|
|
|
g_variant_get (parameters, "(@a{sa{sv}}&o&o)", &settings, &device_path, &specific_object_path);
|
|
|
|
|
|
|
|
|
|
if (options) {
|
|
|
|
|
GVariantIter iter;
|
|
|
|
|
const char *option_name;
|
|
|
|
|
GVariant *option_value;
|
|
|
|
|
|
|
|
|
|
g_variant_iter_init (&iter, options);
|
|
|
|
|
while (g_variant_iter_next (&iter, "{&sv}", &option_name, &option_value)) {
|
|
|
|
|
gs_unref_variant GVariant *option_value_free = NULL;
|
2018-10-25 16:54:37 +02:00
|
|
|
const char *s;
|
2018-10-30 16:40:40 +01:00
|
|
|
|
|
|
|
|
option_value_free = option_value;
|
|
|
|
|
|
2018-11-17 15:54:49 +01:00
|
|
|
if ( nm_streq (option_name, "persist")
|
|
|
|
|
&& g_variant_is_of_type (option_value, G_VARIANT_TYPE_STRING)) {
|
2018-10-25 16:54:37 +02:00
|
|
|
s = g_variant_get_string (option_value, NULL);
|
|
|
|
|
|
2018-11-17 15:54:49 +01:00
|
|
|
if (nm_streq (s, "volatile"))
|
2018-10-25 16:54:37 +02:00
|
|
|
persist = NM_SETTINGS_CONNECTION_PERSIST_MODE_VOLATILE_ONLY;
|
2018-11-17 15:54:49 +01:00
|
|
|
else if (nm_streq (s, "memory"))
|
2018-10-25 16:54:37 +02:00
|
|
|
persist = NM_SETTINGS_CONNECTION_PERSIST_MODE_IN_MEMORY_ONLY;
|
2018-11-17 15:54:49 +01:00
|
|
|
else if (nm_streq (s, "disk"))
|
2018-10-25 16:54:37 +02:00
|
|
|
persist = NM_SETTINGS_CONNECTION_PERSIST_MODE_DISK;
|
2018-11-17 15:54:49 +01:00
|
|
|
else {
|
2018-10-25 16:54:37 +02:00
|
|
|
error = g_error_new_literal (NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_INVALID_ARGUMENTS,
|
2018-11-17 15:54:49 +01:00
|
|
|
"Option \"persist\" must be one of \"volatile\", \"memory\" or \"disk\"");
|
2018-10-25 16:54:37 +02:00
|
|
|
goto error;
|
|
|
|
|
}
|
2018-11-19 11:16:44 +01:00
|
|
|
} else if ( nm_streq (option_name, "bind-activation")
|
2018-11-17 15:54:49 +01:00
|
|
|
&& g_variant_is_of_type (option_value, G_VARIANT_TYPE_STRING)) {
|
2018-10-26 13:17:31 +02:00
|
|
|
s = g_variant_get_string (option_value, NULL);
|
|
|
|
|
|
2018-11-18 11:52:13 +01:00
|
|
|
if (nm_streq (s, "dbus-client"))
|
|
|
|
|
bind_dbus_client = TRUE;
|
|
|
|
|
else if (nm_streq (s, "none"))
|
|
|
|
|
bind_dbus_client = FALSE;
|
|
|
|
|
else {
|
2018-10-26 13:17:31 +02:00
|
|
|
error = g_error_new_literal (NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_INVALID_ARGUMENTS,
|
2018-11-19 11:16:44 +01:00
|
|
|
"Option \"bind-activation\" must be one of \"dbus-client\" or \"none\"");
|
2018-10-26 13:17:31 +02:00
|
|
|
goto error;
|
|
|
|
|
}
|
2018-10-25 16:54:37 +02:00
|
|
|
} else {
|
|
|
|
|
error = g_error_new_literal (NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_INVALID_ARGUMENTS,
|
2018-11-17 15:54:49 +01:00
|
|
|
"Unknown extra option passed");
|
2018-10-25 16:54:37 +02:00
|
|
|
goto error;
|
|
|
|
|
}
|
2018-10-30 16:40:40 +01:00
|
|
|
}
|
|
|
|
|
}
|
2012-09-13 13:17:46 -05:00
|
|
|
|
2018-04-11 17:05:00 +02:00
|
|
|
specific_object_path = nm_utils_dbus_normalize_object_path (specific_object_path);
|
|
|
|
|
device_path = nm_utils_dbus_normalize_object_path (device_path);
|
2014-02-10 10:51:15 +01:00
|
|
|
|
2013-11-01 13:59:57 +01:00
|
|
|
/* Try to create a new connection with the given settings.
|
|
|
|
|
* We allow empty settings for AddAndActivateConnection(). In that case,
|
2014-02-10 11:13:55 +01:00
|
|
|
* the connection will be completed in nm_utils_complete_generic() or
|
2013-11-01 13:59:57 +01:00
|
|
|
* nm_device_complete_connection() below. Just make sure we don't expect
|
|
|
|
|
* specific data being in the connection till then (especially in
|
|
|
|
|
* validate_activation_request()).
|
|
|
|
|
*/
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
incompl_conn = nm_simple_connection_new ();
|
2015-04-15 14:53:30 -04:00
|
|
|
if (settings && g_variant_n_children (settings))
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
_nm_connection_replace_settings (incompl_conn, settings, NM_SETTING_PARSE_FLAGS_STRICT, NULL);
|
2012-09-13 13:17:46 -05:00
|
|
|
|
2013-07-29 12:42:16 -05:00
|
|
|
subject = validate_activation_request (self,
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
invocation,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NULL,
|
|
|
|
|
incompl_conn,
|
2013-07-29 12:42:16 -05:00
|
|
|
device_path,
|
|
|
|
|
&device,
|
2018-04-12 11:15:42 +02:00
|
|
|
&is_vpn,
|
2013-07-29 12:42:16 -05:00
|
|
|
&error);
|
|
|
|
|
if (!subject)
|
2012-09-13 13:17:46 -05:00
|
|
|
goto error;
|
|
|
|
|
|
2018-04-12 11:15:42 +02:00
|
|
|
if (is_vpn) {
|
2012-09-13 13:17:46 -05:00
|
|
|
/* Try to fill the VPN's connection setting and name at least */
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (!nm_connection_get_setting_vpn (incompl_conn)) {
|
2014-10-15 15:27:25 -04:00
|
|
|
error = g_error_new_literal (NM_CONNECTION_ERROR,
|
|
|
|
|
NM_CONNECTION_ERROR_MISSING_SETTING,
|
2012-09-13 13:17:46 -05:00
|
|
|
"VPN connections require a 'vpn' setting");
|
2014-10-15 15:27:25 -04:00
|
|
|
g_prefix_error (&error, "%s: ", NM_SETTING_VPN_SETTING_NAME);
|
2012-09-13 13:17:46 -05:00
|
|
|
goto error;
|
|
|
|
|
}
|
|
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
conns = nm_settings_connections_array_to_connections (nm_settings_get_connections (priv->settings, NULL), -1);
|
|
|
|
|
|
2017-09-29 15:11:33 +02:00
|
|
|
nm_utils_complete_generic (priv->platform,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
incompl_conn,
|
2012-09-13 13:17:46 -05:00
|
|
|
NM_SETTING_VPN_SETTING_NAME,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
conns,
|
2012-09-13 13:17:46 -05:00
|
|
|
NULL,
|
2014-08-25 16:21:59 +02:00
|
|
|
_("VPN connection"),
|
2014-08-05 17:11:57 -04:00
|
|
|
NULL,
|
2012-09-13 13:17:46 -05:00
|
|
|
FALSE); /* No IPv6 by default for now */
|
|
|
|
|
} else {
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
conns = nm_settings_connections_array_to_connections (nm_settings_get_connections (priv->settings, NULL), -1);
|
2012-09-13 13:17:46 -05:00
|
|
|
/* Let each device subclass complete the connection */
|
|
|
|
|
if (!nm_device_complete_connection (device,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
incompl_conn,
|
2012-09-13 13:17:46 -05:00
|
|
|
specific_object_path,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
conns,
|
2012-09-13 13:17:46 -05:00
|
|
|
&error))
|
|
|
|
|
goto error;
|
|
|
|
|
}
|
2010-06-03 23:20:11 -07:00
|
|
|
|
2013-08-28 16:19:20 -05:00
|
|
|
active = _new_active_connection (self,
|
2018-04-12 11:32:18 +02:00
|
|
|
is_vpn,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NULL,
|
|
|
|
|
incompl_conn,
|
2016-09-07 17:47:26 +02:00
|
|
|
NULL,
|
2013-08-28 16:19:20 -05:00
|
|
|
specific_object_path,
|
|
|
|
|
device,
|
|
|
|
|
subject,
|
2017-03-07 11:04:36 +01:00
|
|
|
NM_ACTIVATION_TYPE_MANAGED,
|
2018-03-28 17:18:04 +02:00
|
|
|
NM_ACTIVATION_REASON_USER_REQUEST,
|
2013-08-28 16:19:20 -05:00
|
|
|
&error);
|
|
|
|
|
if (!active)
|
2013-07-29 12:42:16 -05:00
|
|
|
goto error;
|
2012-09-13 13:17:46 -05:00
|
|
|
|
2018-11-18 11:52:13 +01:00
|
|
|
if (bind_dbus_client)
|
2018-10-26 13:17:31 +02:00
|
|
|
nm_active_connection_bind_dbus_client (active, dbus_connection, sender);
|
|
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
nm_active_connection_authorize (active,
|
|
|
|
|
incompl_conn,
|
2018-04-18 09:06:54 +02:00
|
|
|
_async_op_complete_ac_auth_cb,
|
|
|
|
|
_async_op_data_new_ac_auth_add_and_activate (self,
|
|
|
|
|
active,
|
|
|
|
|
invocation,
|
2018-10-25 16:54:37 +02:00
|
|
|
incompl_conn,
|
|
|
|
|
persist));
|
2018-04-12 12:01:25 +02:00
|
|
|
|
2018-04-18 09:06:54 +02:00
|
|
|
/* we passed the pointers on to _async_op_data_new_ac_auth_add_and_activate() */
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
g_steal_pointer (&incompl_conn);
|
2018-04-12 12:01:25 +02:00
|
|
|
g_steal_pointer (&active);
|
2013-08-28 16:19:20 -05:00
|
|
|
return;
|
2013-07-29 12:42:16 -05:00
|
|
|
|
2012-09-13 13:17:46 -05:00
|
|
|
error:
|
2016-04-20 12:10:55 +02:00
|
|
|
nm_audit_log_connection_op (NM_AUDIT_OP_CONN_ADD_ACTIVATE, NULL, FALSE, NULL, subject, error->message);
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_dbus_method_invocation_take_error (invocation, error);
|
2007-02-08 15:34:26 +00:00
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2012-09-13 16:51:58 -05:00
|
|
|
|
2008-03-26 13:43:01 +00:00
|
|
|
gboolean
|
|
|
|
|
nm_manager_deactivate_connection (NMManager *manager,
|
2017-01-26 14:20:01 +01:00
|
|
|
NMActiveConnection *active,
|
2008-10-11 19:57:45 +00:00
|
|
|
NMDeviceStateReason reason,
|
2008-03-26 13:43:01 +00:00
|
|
|
GError **error)
|
2007-10-03 14:48:25 +00:00
|
|
|
{
|
2008-03-26 13:43:01 +00:00
|
|
|
gboolean success = FALSE;
|
2007-10-03 14:48:25 +00:00
|
|
|
|
2012-09-14 15:21:29 -05:00
|
|
|
if (NM_IS_VPN_CONNECTION (active)) {
|
2017-03-11 14:39:07 +01:00
|
|
|
NMActiveConnectionStateReason vpn_reason = NM_ACTIVE_CONNECTION_STATE_REASON_USER_DISCONNECTED;
|
2007-10-03 14:48:25 +00:00
|
|
|
|
2017-02-23 15:19:03 +01:00
|
|
|
if (nm_device_state_reason_check (reason) == NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
|
2017-03-11 14:39:07 +01:00
|
|
|
vpn_reason = NM_ACTIVE_CONNECTION_STATE_REASON_CONNECTION_REMOVED;
|
2017-02-23 15:19:03 +01:00
|
|
|
|
2015-08-19 18:55:53 +02:00
|
|
|
if (nm_vpn_connection_deactivate (NM_VPN_CONNECTION (active), vpn_reason, FALSE))
|
2008-03-26 13:43:01 +00:00
|
|
|
success = TRUE;
|
2013-06-28 14:04:29 +02:00
|
|
|
else
|
|
|
|
|
g_set_error_literal (error, NM_MANAGER_ERROR, NM_MANAGER_ERROR_CONNECTION_NOT_ACTIVE,
|
|
|
|
|
"The VPN connection was not active.");
|
2008-03-26 13:43:01 +00:00
|
|
|
} else {
|
2012-09-14 15:21:29 -05:00
|
|
|
g_assert (NM_IS_ACT_REQUEST (active));
|
|
|
|
|
nm_device_state_changed (nm_active_connection_get_device (active),
|
2014-01-03 13:58:05 -05:00
|
|
|
NM_DEVICE_STATE_DEACTIVATING,
|
2012-09-14 15:21:29 -05:00
|
|
|
reason);
|
|
|
|
|
success = TRUE;
|
2008-03-26 13:43:01 +00:00
|
|
|
}
|
|
|
|
|
|
2012-09-14 15:21:29 -05:00
|
|
|
if (success)
|
2016-04-01 17:34:51 +02:00
|
|
|
_notify (manager, PROP_ACTIVE_CONNECTIONS);
|
2012-09-14 15:21:29 -05:00
|
|
|
|
2008-03-26 13:43:01 +00:00
|
|
|
return success;
|
|
|
|
|
}
|
|
|
|
|
|
2010-06-04 00:42:10 -07:00
|
|
|
static void
|
|
|
|
|
deactivate_net_auth_done_cb (NMAuthChain *chain,
|
2011-05-18 22:20:24 -05:00
|
|
|
GError *auth_error,
|
2015-04-15 14:53:30 -04:00
|
|
|
GDBusMethodInvocation *context,
|
2010-06-04 00:42:10 -07:00
|
|
|
gpointer user_data)
|
|
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER (user_data);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2011-05-18 22:20:24 -05:00
|
|
|
GError *error = NULL;
|
2010-06-04 00:42:10 -07:00
|
|
|
NMAuthCallResult result;
|
2015-07-14 10:26:54 +02:00
|
|
|
NMActiveConnection *active;
|
|
|
|
|
char *path;
|
2010-06-04 00:42:10 -07:00
|
|
|
|
2013-07-29 11:53:23 -05:00
|
|
|
g_assert (context);
|
|
|
|
|
|
2010-06-04 00:42:10 -07:00
|
|
|
priv->auth_chains = g_slist_remove (priv->auth_chains, chain);
|
|
|
|
|
|
2015-07-14 10:26:54 +02:00
|
|
|
path = nm_auth_chain_get_data (chain, "path");
|
2012-10-08 12:52:15 -05:00
|
|
|
result = nm_auth_chain_get_result (chain, NM_AUTH_PERMISSION_NETWORK_CONTROL);
|
2017-01-26 14:20:01 +01:00
|
|
|
active = active_connection_get_by_path (self, path);
|
2012-10-08 12:52:15 -05:00
|
|
|
|
2012-06-01 16:53:23 -05:00
|
|
|
if (auth_error) {
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGD (LOGD_CORE, "Disconnect request failed: %s", auth_error->message);
|
2012-06-01 16:53:23 -05:00
|
|
|
error = g_error_new (NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
"Deactivate request failed: %s",
|
|
|
|
|
auth_error->message);
|
|
|
|
|
} else if (result != NM_AUTH_CALL_RESULT_YES) {
|
|
|
|
|
error = g_error_new_literal (NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
"Not authorized to deactivate connections");
|
2017-01-26 14:20:01 +01:00
|
|
|
} else if (!active) {
|
|
|
|
|
error = g_error_new_literal (NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_CONNECTION_NOT_ACTIVE,
|
|
|
|
|
"The connection was not active.");
|
2012-06-01 16:53:23 -05:00
|
|
|
} else {
|
|
|
|
|
/* success; deactivation allowed */
|
2011-05-18 22:20:24 -05:00
|
|
|
if (!nm_manager_deactivate_connection (self,
|
2017-01-26 14:20:01 +01:00
|
|
|
active,
|
2011-05-18 22:20:24 -05:00
|
|
|
NM_DEVICE_STATE_REASON_USER_REQUESTED,
|
|
|
|
|
&error))
|
2016-02-28 16:25:36 +01:00
|
|
|
nm_assert (error);
|
2010-06-04 00:42:10 -07:00
|
|
|
}
|
|
|
|
|
|
2015-07-14 10:26:54 +02:00
|
|
|
if (active) {
|
|
|
|
|
nm_audit_log_connection_op (NM_AUDIT_OP_CONN_DEACTIVATE,
|
2015-07-14 16:53:24 +02:00
|
|
|
nm_active_connection_get_settings_connection (active),
|
2015-07-14 10:26:54 +02:00
|
|
|
!error,
|
2016-04-20 12:10:55 +02:00
|
|
|
NULL,
|
2015-07-14 10:26:54 +02:00
|
|
|
nm_auth_chain_get_subject (chain),
|
|
|
|
|
error ? error->message : NULL);
|
|
|
|
|
}
|
|
|
|
|
|
2015-04-15 14:53:30 -04:00
|
|
|
if (error)
|
|
|
|
|
g_dbus_method_invocation_take_error (context, error);
|
|
|
|
|
else
|
|
|
|
|
g_dbus_method_invocation_return_value (context, NULL);
|
|
|
|
|
|
2018-04-09 11:52:55 +02:00
|
|
|
nm_auth_chain_destroy (chain);
|
2010-06-04 00:42:10 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
impl_manager_deactivate_connection (NMDBusObject *obj,
|
|
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
|
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *dbus_connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
|
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER (obj);
|
2010-06-04 00:42:10 -07:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2015-08-12 18:29:10 +02:00
|
|
|
NMActiveConnection *ac;
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *sett_conn = NULL;
|
2010-06-04 00:42:10 -07:00
|
|
|
GError *error = NULL;
|
2014-01-17 11:18:23 -06:00
|
|
|
NMAuthSubject *subject = NULL;
|
2010-06-04 00:42:10 -07:00
|
|
|
NMAuthChain *chain;
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
const char *active_path;
|
|
|
|
|
|
|
|
|
|
g_variant_get (parameters, "(&o)", &active_path);
|
2010-06-04 00:42:10 -07:00
|
|
|
|
2012-08-22 17:11:31 -05:00
|
|
|
/* Find the connection by its object path */
|
2015-08-12 18:29:10 +02:00
|
|
|
ac = active_connection_get_by_path (self, active_path);
|
|
|
|
|
if (ac)
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
sett_conn = nm_active_connection_get_settings_connection (ac);
|
2010-06-04 00:42:10 -07:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (!sett_conn) {
|
2010-06-04 00:42:10 -07:00
|
|
|
error = g_error_new_literal (NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_CONNECTION_NOT_ACTIVE,
|
|
|
|
|
"The connection was not active.");
|
2013-07-29 11:53:23 -05:00
|
|
|
goto done;
|
2010-06-04 00:42:10 -07:00
|
|
|
}
|
|
|
|
|
|
2014-01-17 11:18:23 -06:00
|
|
|
/* Validate the caller */
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
subject = nm_auth_subject_new_unix_process_from_context (invocation);
|
2014-01-17 11:18:23 -06:00
|
|
|
if (!subject) {
|
|
|
|
|
error = g_error_new_literal (NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
"Failed to get request UID.");
|
|
|
|
|
goto done;
|
|
|
|
|
}
|
|
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (!nm_auth_is_subject_in_acl_set_error (nm_settings_connection_get_connection (sett_conn),
|
2018-04-12 09:48:16 +02:00
|
|
|
subject,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
&error))
|
2014-01-17 11:18:23 -06:00
|
|
|
goto done;
|
|
|
|
|
|
2012-12-16 12:30:41 -06:00
|
|
|
/* Validate the user request */
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
chain = nm_auth_chain_new_subject (subject, invocation, deactivate_net_auth_done_cb, self);
|
2013-07-29 11:53:23 -05:00
|
|
|
if (!chain) {
|
2010-06-04 00:42:10 -07:00
|
|
|
error = g_error_new_literal (NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
2013-07-29 11:53:23 -05:00
|
|
|
"Unable to authenticate request.");
|
|
|
|
|
goto done;
|
2010-06-04 00:42:10 -07:00
|
|
|
}
|
2013-07-29 11:53:23 -05:00
|
|
|
|
|
|
|
|
priv->auth_chains = g_slist_append (priv->auth_chains, chain);
|
|
|
|
|
nm_auth_chain_set_data (chain, "path", g_strdup (active_path), g_free);
|
|
|
|
|
nm_auth_chain_add_call (chain, NM_AUTH_PERMISSION_NETWORK_CONTROL, TRUE);
|
|
|
|
|
|
|
|
|
|
done:
|
2015-07-14 10:26:54 +02:00
|
|
|
if (error) {
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (sett_conn) {
|
|
|
|
|
nm_audit_log_connection_op (NM_AUDIT_OP_CONN_DEACTIVATE,
|
|
|
|
|
sett_conn, FALSE, NULL,
|
2015-07-14 10:26:54 +02:00
|
|
|
subject, error->message);
|
|
|
|
|
}
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_dbus_method_invocation_take_error (invocation, error);
|
2015-07-14 10:26:54 +02:00
|
|
|
}
|
|
|
|
|
g_clear_object (&subject);
|
2007-10-03 14:48:25 +00:00
|
|
|
}
|
|
|
|
|
|
2016-05-05 14:14:40 +02:00
|
|
|
static gboolean
|
|
|
|
|
sleep_devices_add (NMManager *self, NMDevice *device, gboolean suspending)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
NMSleepMonitorInhibitorHandle *handle = NULL;
|
|
|
|
|
|
|
|
|
|
if (g_hash_table_lookup_extended (priv->sleep_devices, device, NULL, (gpointer *) &handle)) {
|
|
|
|
|
if (suspending) {
|
|
|
|
|
/* if we are suspending, always insert a new handle in sleep_devices.
|
|
|
|
|
* Even if we had an old handle, it might be stale by now. */
|
|
|
|
|
g_hash_table_insert (priv->sleep_devices, device,
|
|
|
|
|
nm_sleep_monitor_inhibit_take (priv->sleep_monitor));
|
|
|
|
|
if (handle)
|
|
|
|
|
nm_sleep_monitor_inhibit_release (priv->sleep_monitor, handle);
|
|
|
|
|
}
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
g_hash_table_insert (priv->sleep_devices,
|
|
|
|
|
g_object_ref (device),
|
|
|
|
|
suspending
|
|
|
|
|
? nm_sleep_monitor_inhibit_take (priv->sleep_monitor)
|
|
|
|
|
: NULL);
|
|
|
|
|
g_signal_connect (device, "notify::" NM_DEVICE_STATE, (GCallback) device_sleep_cb, self);
|
|
|
|
|
return TRUE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
sleep_devices_remove (NMManager *self, NMDevice *device)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
NMSleepMonitorInhibitorHandle *handle;
|
|
|
|
|
|
|
|
|
|
if (!g_hash_table_lookup_extended (priv->sleep_devices, device, NULL, (gpointer *) &handle))
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
|
|
|
|
if (handle)
|
|
|
|
|
nm_sleep_monitor_inhibit_release (priv->sleep_monitor, handle);
|
|
|
|
|
|
|
|
|
|
/* Remove device from hash */
|
|
|
|
|
g_signal_handlers_disconnect_by_func (device, device_sleep_cb, self);
|
|
|
|
|
g_hash_table_remove (priv->sleep_devices, device);
|
|
|
|
|
g_object_unref (device);
|
|
|
|
|
return TRUE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
sleep_devices_clear (NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
NMDevice *device;
|
|
|
|
|
NMSleepMonitorInhibitorHandle *handle;
|
|
|
|
|
GHashTableIter iter;
|
|
|
|
|
|
|
|
|
|
if (!priv->sleep_devices)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
g_hash_table_iter_init (&iter, priv->sleep_devices);
|
|
|
|
|
while (g_hash_table_iter_next (&iter, (gpointer *) &device, (gpointer *) &handle)) {
|
|
|
|
|
g_signal_handlers_disconnect_by_func (device, device_sleep_cb, self);
|
|
|
|
|
if (handle)
|
|
|
|
|
nm_sleep_monitor_inhibit_release (priv->sleep_monitor, handle);
|
|
|
|
|
g_object_unref (device);
|
|
|
|
|
g_hash_table_iter_remove (&iter);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
device_sleep_cb (NMDevice *device,
|
|
|
|
|
GParamSpec *pspec,
|
|
|
|
|
NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
switch (nm_device_get_state (device)) {
|
|
|
|
|
case NM_DEVICE_STATE_DISCONNECTED:
|
|
|
|
|
_LOGD (LOGD_SUSPEND, "sleep: unmanaging device %s", nm_device_get_ip_iface (device));
|
|
|
|
|
nm_device_set_unmanaged_by_flags_queue (device,
|
|
|
|
|
NM_UNMANAGED_SLEEPING,
|
|
|
|
|
TRUE,
|
|
|
|
|
NM_DEVICE_STATE_REASON_SLEEPING);
|
|
|
|
|
break;
|
|
|
|
|
case NM_DEVICE_STATE_UNMANAGED:
|
|
|
|
|
_LOGD (LOGD_SUSPEND, "sleep: device %s is ready", nm_device_get_ip_iface (device));
|
|
|
|
|
|
|
|
|
|
if (!sleep_devices_remove (self, device))
|
|
|
|
|
g_return_if_reached ();
|
|
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2010-05-22 08:55:30 -07:00
|
|
|
static void
|
2014-04-03 15:14:00 -04:00
|
|
|
do_sleep_wake (NMManager *self, gboolean sleeping_changed)
|
2007-02-08 15:34:26 +00:00
|
|
|
{
|
2010-05-22 08:55:30 -07:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2014-04-03 15:14:00 -04:00
|
|
|
gboolean suspending, waking_from_suspend;
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMDevice *device;
|
2007-02-08 15:34:26 +00:00
|
|
|
|
2014-04-03 15:14:00 -04:00
|
|
|
suspending = sleeping_changed && priv->sleeping;
|
|
|
|
|
waking_from_suspend = sleeping_changed && !priv->sleeping;
|
|
|
|
|
|
2010-05-22 08:55:30 -07:00
|
|
|
if (manager_sleeping (self)) {
|
2017-11-29 09:42:39 +01:00
|
|
|
_LOGD (LOGD_SUSPEND, "sleep: %s...", suspending ? "sleeping" : "disabling");
|
2007-02-08 15:34:26 +00:00
|
|
|
|
2014-04-03 15:14:00 -04:00
|
|
|
/* FIXME: are there still hardware devices that need to be disabled around
|
|
|
|
|
* suspend/resume?
|
2007-02-08 15:34:26 +00:00
|
|
|
*/
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst) {
|
2014-04-03 15:14:00 -04:00
|
|
|
/* FIXME: shouldn't we be unmanaging software devices if !suspending? */
|
|
|
|
|
if (nm_device_is_software (device))
|
|
|
|
|
continue;
|
|
|
|
|
/* Wake-on-LAN devices will be taken down post-suspend rather than pre- */
|
2017-09-29 15:11:33 +02:00
|
|
|
if ( suspending
|
|
|
|
|
&& device_is_wake_on_lan (priv->platform, device)) {
|
2016-05-05 14:14:40 +02:00
|
|
|
_LOGD (LOGD_SUSPEND, "sleep: device %s has wake-on-lan, skipping",
|
|
|
|
|
nm_device_get_ip_iface (device));
|
2014-04-03 15:14:00 -04:00
|
|
|
continue;
|
2016-05-05 14:14:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (nm_device_is_activating (device) ||
|
|
|
|
|
nm_device_get_state (device) == NM_DEVICE_STATE_ACTIVATED) {
|
|
|
|
|
_LOGD (LOGD_SUSPEND, "sleep: wait disconnection of device %s",
|
|
|
|
|
nm_device_get_ip_iface (device));
|
2014-04-03 15:14:00 -04:00
|
|
|
|
2016-05-05 14:14:40 +02:00
|
|
|
if (sleep_devices_add (self, device, suspending))
|
|
|
|
|
nm_device_queue_state (device, NM_DEVICE_STATE_DEACTIVATING, NM_DEVICE_STATE_REASON_SLEEPING);
|
|
|
|
|
} else {
|
|
|
|
|
nm_device_set_unmanaged_by_flags (device, NM_UNMANAGED_SLEEPING, TRUE, NM_DEVICE_STATE_REASON_SLEEPING);
|
|
|
|
|
}
|
2013-12-17 14:24:39 -05:00
|
|
|
}
|
2010-05-22 08:55:30 -07:00
|
|
|
} else {
|
2017-11-29 09:42:39 +01:00
|
|
|
_LOGD (LOGD_SUSPEND, "sleep: %s...", waking_from_suspend ? "waking up" : "re-enabling");
|
2014-04-03 15:14:00 -04:00
|
|
|
|
|
|
|
|
if (waking_from_suspend) {
|
2016-05-05 14:14:40 +02:00
|
|
|
sleep_devices_clear (self);
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst) {
|
2014-04-03 15:14:00 -04:00
|
|
|
if (nm_device_is_software (device))
|
|
|
|
|
continue;
|
2016-11-16 11:46:29 +01:00
|
|
|
|
|
|
|
|
/* Belatedly take down Wake-on-LAN devices; ideally we wouldn't have to do this
|
|
|
|
|
* but for now it's the only way to make sure we re-check their connectivity.
|
|
|
|
|
*/
|
2017-09-29 15:11:33 +02:00
|
|
|
if (device_is_wake_on_lan (priv->platform, device))
|
device: remove default-unmanaged and refactor unmanaged flags
Get rid of NM_UNMANAGED_DEFAULT and refine the interaction between
unmanaged flags, device state and managed property.
Previously, the NM_UNMANAGED_DEFAULT was special in that a device was
still considered managed if it had solely the NM_UNMANAGED_DEFAULT flag
set and its state was managed. Thus, whether the device (state) was managed,
depended on the device state too.
Now, a device is considered managed (or unmanaged) based on the unmanaged
flags and realization state alone. At the same time, the device state
directly corresponds to the managed property of the device. Of course,
while changing the unmanaged flags, that invariant is shortly violated
until the state transistion is complete.
Introduce more unmanaged flags whereas some of them are non-authorative.
For example, the EXTERNAL_DOWN flag has only effect as long as the user
didn't explicitly manage the device (NM_UNMANAGED_USER_EXPLICIT). In other
words, certain flags can render other flags ineffective. Whether the device
is considered managed depends on the flags but also at the explicitly unset flags.
In a way, this is similar to previous where NM_UNMANAGED_DEFAULT was ignored
(if no other flags were present).
Also, previously a device that was NM_UNMANAGED_DEFAULT and in disconnected
state would transition back to unmanaged. No longer do that. Once a device is
managed, it stays managed as long as the flags indicate it should be managed.
However, the user can also modify the unmanaged flags via the D-Bus API.
Also get rid or nm_device_finish_init(). That was previously called
by NMManager after add_device(). As we now realize devices (possibly
multiple times) this should be handled during realization.
https://bugzilla.gnome.org/show_bug.cgi?id=746566
2015-09-15 15:35:16 +02:00
|
|
|
nm_device_set_unmanaged_by_flags (device, NM_UNMANAGED_SLEEPING, TRUE, NM_DEVICE_STATE_REASON_SLEEPING);
|
2016-11-16 11:46:29 +01:00
|
|
|
|
|
|
|
|
/* Check if the device is unmanaged but the state transition is still pending.
|
|
|
|
|
* If so, change state now so that later we re-manage the device forcing a
|
|
|
|
|
* re-check of available connections.
|
|
|
|
|
*/
|
|
|
|
|
if ( !nm_device_get_managed (device, FALSE)
|
|
|
|
|
&& nm_device_get_state (device) != NM_DEVICE_STATE_UNMANAGED) {
|
|
|
|
|
nm_device_state_changed (device, NM_DEVICE_STATE_UNMANAGED, NM_DEVICE_STATE_REASON_SLEEPING);
|
|
|
|
|
}
|
2014-04-03 15:14:00 -04:00
|
|
|
}
|
|
|
|
|
}
|
2007-02-08 15:34:26 +00:00
|
|
|
|
2009-11-24 10:43:43 -08:00
|
|
|
/* Ensure rfkill state is up-to-date since we don't respond to state
|
|
|
|
|
* changes during sleep.
|
|
|
|
|
*/
|
2009-12-23 00:03:45 -08:00
|
|
|
nm_manager_rfkill_update (self, RFKILL_TYPE_UNKNOWN);
|
2009-11-24 10:43:43 -08:00
|
|
|
|
2009-06-11 00:39:12 -04:00
|
|
|
/* Re-manage managed devices */
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst) {
|
2009-12-23 00:03:45 -08:00
|
|
|
guint i;
|
2009-11-20 09:11:46 -08:00
|
|
|
|
2016-03-26 01:26:36 -04:00
|
|
|
if (nm_device_is_software (device)) {
|
|
|
|
|
/* We do not manage/unmanage software devices but
|
|
|
|
|
* their dhcp leases could have gone stale so we need
|
|
|
|
|
* to renew them */
|
|
|
|
|
nm_device_update_dynamic_ip_setup (device);
|
2013-12-17 14:24:39 -05:00
|
|
|
continue;
|
2016-03-26 01:26:36 -04:00
|
|
|
}
|
2013-12-17 14:24:39 -05:00
|
|
|
|
2009-11-20 09:11:46 -08:00
|
|
|
/* enable/disable wireless devices since that we don't respond
|
|
|
|
|
* to killswitch changes during sleep.
|
|
|
|
|
*/
|
2009-12-23 00:03:45 -08:00
|
|
|
for (i = 0; i < RFKILL_TYPE_MAX; i++) {
|
|
|
|
|
RadioState *rstate = &priv->radio_states[i];
|
2011-04-13 21:58:25 -05:00
|
|
|
gboolean enabled = radio_enabled_for_rstate (rstate, TRUE);
|
2009-12-23 00:03:45 -08:00
|
|
|
|
2010-04-08 18:23:43 -07:00
|
|
|
if (rstate->desc) {
|
2017-06-07 13:13:55 +02:00
|
|
|
_LOGD (LOGD_RFKILL, "rfkill: %s %s devices (hw_enabled %d, sw_enabled %d, user_enabled %d)",
|
2016-03-02 11:38:26 +01:00
|
|
|
enabled ? "enabling" : "disabling",
|
|
|
|
|
rstate->desc, rstate->hw_enabled, rstate->sw_enabled, rstate->user_enabled);
|
2010-04-08 18:23:43 -07:00
|
|
|
}
|
2011-11-18 12:02:58 -06:00
|
|
|
if (nm_device_get_rfkill_type (device) == rstate->rtype)
|
2011-11-17 23:38:08 -06:00
|
|
|
nm_device_set_enabled (device, enabled);
|
2009-12-23 00:03:45 -08:00
|
|
|
}
|
2009-06-11 00:39:12 -04:00
|
|
|
|
device: remove default-unmanaged and refactor unmanaged flags
Get rid of NM_UNMANAGED_DEFAULT and refine the interaction between
unmanaged flags, device state and managed property.
Previously, the NM_UNMANAGED_DEFAULT was special in that a device was
still considered managed if it had solely the NM_UNMANAGED_DEFAULT flag
set and its state was managed. Thus, whether the device (state) was managed,
depended on the device state too.
Now, a device is considered managed (or unmanaged) based on the unmanaged
flags and realization state alone. At the same time, the device state
directly corresponds to the managed property of the device. Of course,
while changing the unmanaged flags, that invariant is shortly violated
until the state transistion is complete.
Introduce more unmanaged flags whereas some of them are non-authorative.
For example, the EXTERNAL_DOWN flag has only effect as long as the user
didn't explicitly manage the device (NM_UNMANAGED_USER_EXPLICIT). In other
words, certain flags can render other flags ineffective. Whether the device
is considered managed depends on the flags but also at the explicitly unset flags.
In a way, this is similar to previous where NM_UNMANAGED_DEFAULT was ignored
(if no other flags were present).
Also, previously a device that was NM_UNMANAGED_DEFAULT and in disconnected
state would transition back to unmanaged. No longer do that. Once a device is
managed, it stays managed as long as the flags indicate it should be managed.
However, the user can also modify the unmanaged flags via the D-Bus API.
Also get rid or nm_device_finish_init(). That was previously called
by NMManager after add_device(). As we now realize devices (possibly
multiple times) this should be handled during realization.
https://bugzilla.gnome.org/show_bug.cgi?id=746566
2015-09-15 15:35:16 +02:00
|
|
|
nm_device_set_unmanaged_by_flags (device, NM_UNMANAGED_SLEEPING, FALSE, NM_DEVICE_STATE_REASON_NOW_MANAGED);
|
2008-05-22 14:27:40 +00:00
|
|
|
}
|
2007-02-08 15:34:26 +00:00
|
|
|
}
|
|
|
|
|
|
2009-06-11 00:39:12 -04:00
|
|
|
nm_manager_update_state (self);
|
2010-05-22 08:55:30 -07:00
|
|
|
}
|
|
|
|
|
|
2010-05-29 23:11:45 -07:00
|
|
|
static void
|
|
|
|
|
_internal_sleep (NMManager *self, gboolean do_sleep)
|
2010-05-22 08:55:30 -07:00
|
|
|
{
|
2016-04-27 18:26:39 +02:00
|
|
|
NMManagerPrivate *priv;
|
|
|
|
|
|
|
|
|
|
g_return_if_fail (NM_IS_MANAGER (self));
|
|
|
|
|
|
|
|
|
|
priv = NM_MANAGER_GET_PRIVATE (self);
|
2010-05-22 08:55:30 -07:00
|
|
|
|
2010-08-31 15:45:55 -05:00
|
|
|
if (priv->sleeping == do_sleep)
|
|
|
|
|
return;
|
|
|
|
|
|
2017-11-29 09:42:39 +01:00
|
|
|
_LOGI (LOGD_SUSPEND, "sleep: %s requested (sleeping: %s enabled: %s)",
|
2016-03-02 11:38:26 +01:00
|
|
|
do_sleep ? "sleep" : "wake",
|
|
|
|
|
priv->sleeping ? "yes" : "no",
|
|
|
|
|
priv->net_enabled ? "yes" : "no");
|
2010-05-22 08:55:30 -07:00
|
|
|
|
2010-05-28 13:06:14 -07:00
|
|
|
priv->sleeping = do_sleep;
|
2010-05-22 08:55:30 -07:00
|
|
|
|
2014-04-03 15:14:00 -04:00
|
|
|
do_sleep_wake (self, TRUE);
|
2009-10-20 15:25:04 -07:00
|
|
|
|
2016-04-01 17:34:51 +02:00
|
|
|
_notify (self, PROP_SLEEPING);
|
2010-05-29 23:11:45 -07:00
|
|
|
}
|
|
|
|
|
|
2010-10-15 10:28:38 -05:00
|
|
|
#if 0
|
2010-05-29 23:11:45 -07:00
|
|
|
static void
|
|
|
|
|
sleep_auth_done_cb (NMAuthChain *chain,
|
|
|
|
|
GError *error,
|
2015-04-15 14:53:30 -04:00
|
|
|
GDBusMethodInvocation *context,
|
2010-05-29 23:11:45 -07:00
|
|
|
gpointer user_data)
|
|
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER (user_data);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
GError *ret_error;
|
|
|
|
|
NMAuthCallResult result;
|
|
|
|
|
gboolean do_sleep;
|
|
|
|
|
|
|
|
|
|
priv->auth_chains = g_slist_remove (priv->auth_chains, chain);
|
2010-06-02 02:16:14 -07:00
|
|
|
|
2012-10-08 12:52:15 -05:00
|
|
|
result = nm_auth_chain_get_result (chain, NM_AUTH_PERMISSION_SLEEP_WAKE);
|
2010-05-29 23:11:45 -07:00
|
|
|
if (error) {
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGD (LOGD_SUSPEND, "Sleep/wake request failed: %s", error->message);
|
2010-05-29 23:11:45 -07:00
|
|
|
ret_error = g_error_new (NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
"Sleep/wake request failed: %s",
|
|
|
|
|
error->message);
|
2015-04-15 14:53:30 -04:00
|
|
|
g_dbus_method_invocation_take_error (context, ret_error);
|
2010-05-29 23:11:45 -07:00
|
|
|
} else if (result != NM_AUTH_CALL_RESULT_YES) {
|
|
|
|
|
ret_error = g_error_new_literal (NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
"Not authorized to sleep/wake");
|
2015-04-15 14:53:30 -04:00
|
|
|
g_dbus_method_invocation_take_error (context, ret_error);
|
2010-05-29 23:11:45 -07:00
|
|
|
} else {
|
2010-06-02 02:16:14 -07:00
|
|
|
/* Auth success */
|
|
|
|
|
do_sleep = GPOINTER_TO_UINT (nm_auth_chain_get_data (chain, "sleep"));
|
2010-05-29 23:11:45 -07:00
|
|
|
_internal_sleep (self, do_sleep);
|
2015-04-15 14:53:30 -04:00
|
|
|
g_dbus_method_invocation_return_value (context, NULL);
|
2010-05-29 23:11:45 -07:00
|
|
|
}
|
|
|
|
|
|
2018-04-09 11:52:55 +02:00
|
|
|
nm_auth_chain_destroy (chain);
|
2010-05-29 23:11:45 -07:00
|
|
|
}
|
2010-10-15 10:28:38 -05:00
|
|
|
#endif
|
2010-05-29 23:11:45 -07:00
|
|
|
|
|
|
|
|
static void
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
impl_manager_sleep (NMDBusObject *obj,
|
|
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
|
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
|
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER (obj);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2010-05-30 08:30:37 -07:00
|
|
|
GError *error = NULL;
|
2015-07-14 10:26:54 +02:00
|
|
|
gs_unref_object NMAuthSubject *subject = NULL;
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
gboolean do_sleep;
|
2010-05-29 23:11:45 -07:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_variant_get (parameters, "(b)", &do_sleep);
|
2010-05-29 23:11:45 -07:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
subject = nm_auth_subject_new_unix_process_from_context (invocation);
|
2010-05-29 23:11:45 -07:00
|
|
|
|
|
|
|
|
if (priv->sleeping == do_sleep) {
|
|
|
|
|
error = g_error_new (NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_ALREADY_ASLEEP_OR_AWAKE,
|
|
|
|
|
"Already %s", do_sleep ? "asleep" : "awake");
|
2015-07-14 10:26:54 +02:00
|
|
|
nm_audit_log_control_op (NM_AUDIT_OP_SLEEP_CONTROL, do_sleep ? "on" : "off", FALSE, subject,
|
|
|
|
|
error->message);
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_dbus_method_invocation_take_error (invocation, error);
|
2010-05-29 23:11:45 -07:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2010-10-15 10:28:38 -05:00
|
|
|
/* Unconditionally allow the request. Previously it was polkit protected
|
|
|
|
|
* but unfortunately that doesn't work for short-lived processes like
|
|
|
|
|
* pm-utils. It uses dbus-send without --print-reply, which quits
|
|
|
|
|
* immediately after sending the request, and NM is unable to obtain the
|
|
|
|
|
* sender's UID as dbus-send has already dropped off the bus. Thus NM
|
|
|
|
|
* fails the request. Instead, don't validate the request, but rely on
|
|
|
|
|
* D-Bus permissions to restrict the call to root.
|
|
|
|
|
*/
|
|
|
|
|
_internal_sleep (self, do_sleep);
|
2015-07-14 10:26:54 +02:00
|
|
|
nm_audit_log_control_op (NM_AUDIT_OP_SLEEP_CONTROL, do_sleep ? "on" : "off", TRUE, subject, NULL);
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_dbus_method_invocation_return_value (invocation, NULL);
|
2010-10-15 10:28:38 -05:00
|
|
|
return;
|
2007-02-12 09:23:43 +00:00
|
|
|
}
|
|
|
|
|
|
2010-08-31 15:45:55 -05:00
|
|
|
static void
|
2016-04-27 18:21:28 +02:00
|
|
|
sleeping_cb (NMSleepMonitor *monitor, gboolean is_about_to_suspend, gpointer user_data)
|
2010-08-31 15:45:55 -05:00
|
|
|
{
|
2016-04-27 18:26:39 +02:00
|
|
|
NMManager *self = user_data;
|
|
|
|
|
|
2017-11-29 09:42:39 +01:00
|
|
|
_LOGT (LOGD_SUSPEND, "sleep: received %s signal", is_about_to_suspend ? "sleeping" : "resuming");
|
2016-04-27 18:21:28 +02:00
|
|
|
_internal_sleep (self, is_about_to_suspend);
|
2010-08-31 15:45:55 -05:00
|
|
|
}
|
|
|
|
|
|
2010-05-29 23:00:46 -07:00
|
|
|
static void
|
|
|
|
|
_internal_enable (NMManager *self, gboolean enable)
|
2010-05-22 08:55:30 -07:00
|
|
|
{
|
2010-05-29 23:00:46 -07:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2010-05-22 08:55:30 -07:00
|
|
|
|
2016-04-07 18:42:24 +02:00
|
|
|
nm_config_state_set (priv->config, TRUE, FALSE,
|
|
|
|
|
NM_CONFIG_STATE_PROPERTY_NETWORKING_ENABLED, enable);
|
2010-05-22 08:55:30 -07:00
|
|
|
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGI (LOGD_SUSPEND, "%s requested (sleeping: %s enabled: %s)",
|
|
|
|
|
enable ? "enable" : "disable",
|
|
|
|
|
priv->sleeping ? "yes" : "no",
|
|
|
|
|
priv->net_enabled ? "yes" : "no");
|
2010-05-22 08:55:30 -07:00
|
|
|
|
|
|
|
|
priv->net_enabled = enable;
|
|
|
|
|
|
2014-04-03 15:14:00 -04:00
|
|
|
do_sleep_wake (self, FALSE);
|
2010-05-22 08:55:30 -07:00
|
|
|
|
2016-04-01 17:34:51 +02:00
|
|
|
_notify (self, PROP_NETWORKING_ENABLED);
|
2010-05-29 23:00:46 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
enable_net_done_cb (NMAuthChain *chain,
|
|
|
|
|
GError *error,
|
2015-04-15 14:53:30 -04:00
|
|
|
GDBusMethodInvocation *context,
|
2010-05-29 23:00:46 -07:00
|
|
|
gpointer user_data)
|
|
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER (user_data);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2012-10-08 12:52:15 -05:00
|
|
|
GError *ret_error = NULL;
|
2010-05-29 23:00:46 -07:00
|
|
|
NMAuthCallResult result;
|
|
|
|
|
gboolean enable;
|
2015-07-14 10:26:54 +02:00
|
|
|
NMAuthSubject *subject;
|
2010-05-29 23:00:46 -07:00
|
|
|
|
2013-07-29 11:53:23 -05:00
|
|
|
g_assert (context);
|
|
|
|
|
|
2010-05-29 23:00:46 -07:00
|
|
|
priv->auth_chains = g_slist_remove (priv->auth_chains, chain);
|
2015-07-14 10:26:54 +02:00
|
|
|
enable = GPOINTER_TO_UINT (nm_auth_chain_get_data (chain, "enable"));
|
|
|
|
|
subject = nm_auth_chain_get_subject (chain);
|
2010-06-02 02:16:14 -07:00
|
|
|
|
2012-10-08 12:52:15 -05:00
|
|
|
result = nm_auth_chain_get_result (chain, NM_AUTH_PERMISSION_ENABLE_DISABLE_NETWORK);
|
2010-05-29 23:00:46 -07:00
|
|
|
if (error) {
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGD (LOGD_CORE, "Enable request failed: %s", error->message);
|
2010-05-29 23:00:46 -07:00
|
|
|
ret_error = g_error_new (NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
"Enable request failed: %s",
|
|
|
|
|
error->message);
|
|
|
|
|
} else if (result != NM_AUTH_CALL_RESULT_YES) {
|
|
|
|
|
ret_error = g_error_new_literal (NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
"Not authorized to enable/disable networking");
|
|
|
|
|
} else {
|
2010-06-02 02:16:14 -07:00
|
|
|
/* Auth success */
|
2010-05-29 23:00:46 -07:00
|
|
|
_internal_enable (self, enable);
|
2015-04-15 14:53:30 -04:00
|
|
|
g_dbus_method_invocation_return_value (context, NULL);
|
2015-07-14 10:26:54 +02:00
|
|
|
nm_audit_log_control_op (NM_AUDIT_OP_NET_CONTROL, enable ? "on" : "off", TRUE,
|
|
|
|
|
subject, NULL);
|
2010-05-29 23:00:46 -07:00
|
|
|
}
|
|
|
|
|
|
2012-10-08 12:52:15 -05:00
|
|
|
if (ret_error) {
|
2015-07-14 10:26:54 +02:00
|
|
|
nm_audit_log_control_op (NM_AUDIT_OP_NET_CONTROL, enable ? "on" : "off", FALSE,
|
|
|
|
|
subject, ret_error->message);
|
2015-04-15 14:53:30 -04:00
|
|
|
g_dbus_method_invocation_take_error (context, ret_error);
|
2012-10-08 12:52:15 -05:00
|
|
|
}
|
|
|
|
|
|
2018-04-09 11:52:55 +02:00
|
|
|
nm_auth_chain_destroy (chain);
|
2010-05-29 23:00:46 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
impl_manager_enable (NMDBusObject *obj,
|
|
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
|
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
|
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER (obj);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2010-05-29 23:00:46 -07:00
|
|
|
NMAuthChain *chain;
|
2010-05-30 08:30:37 -07:00
|
|
|
GError *error = NULL;
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
gboolean enable;
|
2010-05-29 23:00:46 -07:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_variant_get (parameters, "(b)", &enable);
|
2010-05-29 23:00:46 -07:00
|
|
|
|
|
|
|
|
if (priv->net_enabled == enable) {
|
|
|
|
|
error = g_error_new (NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_ALREADY_ENABLED_OR_DISABLED,
|
|
|
|
|
"Already %s", enable ? "enabled" : "disabled");
|
2013-07-29 11:53:23 -05:00
|
|
|
goto done;
|
2010-05-29 23:00:46 -07:00
|
|
|
}
|
|
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
chain = nm_auth_chain_new_context (invocation, enable_net_done_cb, self);
|
2013-07-29 11:53:23 -05:00
|
|
|
if (!chain) {
|
2010-05-30 08:30:37 -07:00
|
|
|
error = g_error_new_literal (NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
2013-07-29 11:53:23 -05:00
|
|
|
"Unable to authenticate request.");
|
|
|
|
|
goto done;
|
2010-05-30 08:30:37 -07:00
|
|
|
}
|
2013-07-29 11:53:23 -05:00
|
|
|
|
|
|
|
|
priv->auth_chains = g_slist_append (priv->auth_chains, chain);
|
|
|
|
|
nm_auth_chain_set_data (chain, "enable", GUINT_TO_POINTER (enable), NULL);
|
|
|
|
|
nm_auth_chain_add_call (chain, NM_AUTH_PERMISSION_ENABLE_DISABLE_NETWORK, TRUE);
|
|
|
|
|
|
|
|
|
|
done:
|
|
|
|
|
if (error)
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_dbus_method_invocation_take_error (invocation, error);
|
2010-05-22 08:55:30 -07:00
|
|
|
}
|
|
|
|
|
|
2010-05-28 18:23:00 -07:00
|
|
|
/* Permissions */
|
|
|
|
|
|
2010-06-02 02:16:14 -07:00
|
|
|
static void
|
2016-04-27 18:26:39 +02:00
|
|
|
get_perm_add_result (NMManager *self, NMAuthChain *chain, GVariantBuilder *results, const char *permission)
|
2010-06-02 02:16:14 -07:00
|
|
|
{
|
|
|
|
|
NMAuthCallResult result;
|
|
|
|
|
|
2012-10-08 12:52:15 -05:00
|
|
|
result = nm_auth_chain_get_result (chain, permission);
|
2010-06-02 02:16:14 -07:00
|
|
|
if (result == NM_AUTH_CALL_RESULT_YES)
|
2015-04-15 14:53:30 -04:00
|
|
|
g_variant_builder_add (results, "{ss}", permission, "yes");
|
2010-06-02 02:16:14 -07:00
|
|
|
else if (result == NM_AUTH_CALL_RESULT_NO)
|
2015-04-15 14:53:30 -04:00
|
|
|
g_variant_builder_add (results, "{ss}", permission, "no");
|
2010-06-02 02:16:14 -07:00
|
|
|
else if (result == NM_AUTH_CALL_RESULT_AUTH)
|
2015-04-15 14:53:30 -04:00
|
|
|
g_variant_builder_add (results, "{ss}", permission, "auth");
|
2010-06-02 02:16:14 -07:00
|
|
|
else {
|
2016-04-27 18:26:39 +02:00
|
|
|
_LOGD (LOGD_CORE, "unknown auth chain result %d", result);
|
2010-06-02 02:16:14 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2010-05-28 18:23:00 -07:00
|
|
|
static void
|
|
|
|
|
get_permissions_done_cb (NMAuthChain *chain,
|
|
|
|
|
GError *error,
|
2015-04-15 14:53:30 -04:00
|
|
|
GDBusMethodInvocation *context,
|
2010-05-29 23:00:46 -07:00
|
|
|
gpointer user_data)
|
2010-05-28 18:23:00 -07:00
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER (user_data);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
GError *ret_error;
|
2015-04-15 14:53:30 -04:00
|
|
|
GVariantBuilder results;
|
2010-05-28 18:23:00 -07:00
|
|
|
|
2013-07-29 11:53:23 -05:00
|
|
|
g_assert (context);
|
|
|
|
|
|
2010-05-28 18:23:00 -07:00
|
|
|
priv->auth_chains = g_slist_remove (priv->auth_chains, chain);
|
|
|
|
|
if (error) {
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGD (LOGD_CORE, "Permissions request failed: %s", error->message);
|
2010-05-28 18:23:00 -07:00
|
|
|
ret_error = g_error_new (NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
"Permissions request failed: %s",
|
|
|
|
|
error->message);
|
2015-04-15 14:53:30 -04:00
|
|
|
g_dbus_method_invocation_take_error (context, ret_error);
|
2010-05-28 18:23:00 -07:00
|
|
|
} else {
|
2015-04-15 14:53:30 -04:00
|
|
|
g_variant_builder_init (&results, G_VARIANT_TYPE ("a{ss}"));
|
2012-10-08 12:52:15 -05:00
|
|
|
|
2016-04-27 18:26:39 +02:00
|
|
|
get_perm_add_result (self, chain, &results, NM_AUTH_PERMISSION_ENABLE_DISABLE_NETWORK);
|
|
|
|
|
get_perm_add_result (self, chain, &results, NM_AUTH_PERMISSION_SLEEP_WAKE);
|
|
|
|
|
get_perm_add_result (self, chain, &results, NM_AUTH_PERMISSION_ENABLE_DISABLE_WIFI);
|
|
|
|
|
get_perm_add_result (self, chain, &results, NM_AUTH_PERMISSION_ENABLE_DISABLE_WWAN);
|
|
|
|
|
get_perm_add_result (self, chain, &results, NM_AUTH_PERMISSION_ENABLE_DISABLE_WIMAX);
|
|
|
|
|
get_perm_add_result (self, chain, &results, NM_AUTH_PERMISSION_NETWORK_CONTROL);
|
|
|
|
|
get_perm_add_result (self, chain, &results, NM_AUTH_PERMISSION_WIFI_SHARE_PROTECTED);
|
|
|
|
|
get_perm_add_result (self, chain, &results, NM_AUTH_PERMISSION_WIFI_SHARE_OPEN);
|
|
|
|
|
get_perm_add_result (self, chain, &results, NM_AUTH_PERMISSION_SETTINGS_MODIFY_SYSTEM);
|
|
|
|
|
get_perm_add_result (self, chain, &results, NM_AUTH_PERMISSION_SETTINGS_MODIFY_OWN);
|
|
|
|
|
get_perm_add_result (self, chain, &results, NM_AUTH_PERMISSION_SETTINGS_MODIFY_HOSTNAME);
|
2016-06-01 22:04:00 +02:00
|
|
|
get_perm_add_result (self, chain, &results, NM_AUTH_PERMISSION_SETTINGS_MODIFY_GLOBAL_DNS);
|
2016-05-30 15:42:44 +02:00
|
|
|
get_perm_add_result (self, chain, &results, NM_AUTH_PERMISSION_RELOAD);
|
2016-08-17 15:34:55 +02:00
|
|
|
get_perm_add_result (self, chain, &results, NM_AUTH_PERMISSION_CHECKPOINT_ROLLBACK);
|
2016-08-10 11:54:32 +02:00
|
|
|
get_perm_add_result (self, chain, &results, NM_AUTH_PERMISSION_ENABLE_DISABLE_STATISTICS);
|
2017-08-09 15:20:04 +08:00
|
|
|
get_perm_add_result (self, chain, &results, NM_AUTH_PERMISSION_ENABLE_DISABLE_CONNECTIVITY_CHECK);
|
2012-10-08 12:52:15 -05:00
|
|
|
|
2015-04-15 14:53:30 -04:00
|
|
|
g_dbus_method_invocation_return_value (context,
|
|
|
|
|
g_variant_new ("(a{ss})", &results));
|
2010-05-28 18:23:00 -07:00
|
|
|
}
|
|
|
|
|
|
2018-04-09 11:52:55 +02:00
|
|
|
nm_auth_chain_destroy (chain);
|
2010-05-28 18:23:00 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
impl_manager_get_permissions (NMDBusObject *obj,
|
|
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
|
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
|
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER (obj);
|
2010-05-28 18:23:00 -07:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
NMAuthChain *chain;
|
|
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
chain = nm_auth_chain_new_context (invocation, get_permissions_done_cb, self);
|
2013-07-29 11:53:23 -05:00
|
|
|
if (!chain) {
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_dbus_method_invocation_return_error_literal (invocation,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
"Unable to authenticate request.");
|
2013-07-29 11:53:23 -05:00
|
|
|
return;
|
2012-10-08 12:52:15 -05:00
|
|
|
}
|
2013-07-29 11:53:23 -05:00
|
|
|
|
|
|
|
|
priv->auth_chains = g_slist_append (priv->auth_chains, chain);
|
|
|
|
|
nm_auth_chain_add_call (chain, NM_AUTH_PERMISSION_ENABLE_DISABLE_NETWORK, FALSE);
|
|
|
|
|
nm_auth_chain_add_call (chain, NM_AUTH_PERMISSION_SLEEP_WAKE, FALSE);
|
|
|
|
|
nm_auth_chain_add_call (chain, NM_AUTH_PERMISSION_ENABLE_DISABLE_WIFI, FALSE);
|
|
|
|
|
nm_auth_chain_add_call (chain, NM_AUTH_PERMISSION_ENABLE_DISABLE_WWAN, FALSE);
|
|
|
|
|
nm_auth_chain_add_call (chain, NM_AUTH_PERMISSION_ENABLE_DISABLE_WIMAX, FALSE);
|
|
|
|
|
nm_auth_chain_add_call (chain, NM_AUTH_PERMISSION_NETWORK_CONTROL, FALSE);
|
|
|
|
|
nm_auth_chain_add_call (chain, NM_AUTH_PERMISSION_WIFI_SHARE_PROTECTED, FALSE);
|
|
|
|
|
nm_auth_chain_add_call (chain, NM_AUTH_PERMISSION_WIFI_SHARE_OPEN, FALSE);
|
|
|
|
|
nm_auth_chain_add_call (chain, NM_AUTH_PERMISSION_SETTINGS_MODIFY_SYSTEM, FALSE);
|
|
|
|
|
nm_auth_chain_add_call (chain, NM_AUTH_PERMISSION_SETTINGS_MODIFY_OWN, FALSE);
|
|
|
|
|
nm_auth_chain_add_call (chain, NM_AUTH_PERMISSION_SETTINGS_MODIFY_HOSTNAME, FALSE);
|
2016-06-01 22:04:00 +02:00
|
|
|
nm_auth_chain_add_call (chain, NM_AUTH_PERMISSION_SETTINGS_MODIFY_GLOBAL_DNS, FALSE);
|
2016-05-30 15:42:44 +02:00
|
|
|
nm_auth_chain_add_call (chain, NM_AUTH_PERMISSION_RELOAD, FALSE);
|
2016-08-17 15:34:55 +02:00
|
|
|
nm_auth_chain_add_call (chain, NM_AUTH_PERMISSION_CHECKPOINT_ROLLBACK, FALSE);
|
2016-08-10 11:54:32 +02:00
|
|
|
nm_auth_chain_add_call (chain, NM_AUTH_PERMISSION_ENABLE_DISABLE_STATISTICS, FALSE);
|
2017-08-09 15:20:04 +08:00
|
|
|
nm_auth_chain_add_call (chain, NM_AUTH_PERMISSION_ENABLE_DISABLE_CONNECTIVITY_CHECK, FALSE);
|
2010-05-28 18:23:00 -07:00
|
|
|
}
|
|
|
|
|
|
2015-04-15 14:53:30 -04:00
|
|
|
static void
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
impl_manager_state (NMDBusObject *obj,
|
|
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
|
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
2011-03-08 12:57:35 -06:00
|
|
|
{
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
NMManager *self = NM_MANAGER (obj);
|
|
|
|
|
|
2015-04-15 14:53:30 -04:00
|
|
|
nm_manager_update_state (self);
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_dbus_method_invocation_return_value (invocation,
|
2015-04-15 14:53:30 -04:00
|
|
|
g_variant_new ("(u)", NM_MANAGER_GET_PRIVATE (self)->state));
|
2011-03-08 12:57:35 -06:00
|
|
|
}
|
|
|
|
|
|
2014-01-22 13:07:24 -06:00
|
|
|
static void
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
impl_manager_set_logging (NMDBusObject *obj,
|
|
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
|
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
|
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER (obj);
|
2014-01-22 13:07:24 -06:00
|
|
|
GError *error = NULL;
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
const char *level;
|
|
|
|
|
const char *domains;
|
2014-01-22 13:07:24 -06:00
|
|
|
|
2016-08-23 09:38:40 +02:00
|
|
|
/* The permission is already enforced by the D-Bus daemon, but we ensure
|
|
|
|
|
* that the caller is still alive so that clients are forced to wait and
|
|
|
|
|
* we'll be able to switch to polkit without breaking behavior.
|
|
|
|
|
*/
|
2018-03-23 21:49:41 +01:00
|
|
|
if (!nm_dbus_manager_ensure_uid (nm_dbus_object_get_manager (NM_DBUS_OBJECT (self)),
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
invocation,
|
|
|
|
|
G_MAXULONG,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED))
|
2016-08-23 09:38:40 +02:00
|
|
|
return;
|
|
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_variant_get (parameters, "(&s&s)", &level, &domains);
|
|
|
|
|
|
2014-01-22 13:07:24 -06:00
|
|
|
if (nm_logging_setup (level, domains, NULL, &error)) {
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGI (LOGD_CORE, "logging: level '%s' domains '%s'",
|
|
|
|
|
nm_logging_level_to_string (), nm_logging_domains_to_string ());
|
2010-04-08 08:56:17 -07:00
|
|
|
}
|
2014-01-22 13:07:24 -06:00
|
|
|
|
2015-04-15 14:53:30 -04:00
|
|
|
if (error)
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_dbus_method_invocation_take_error (invocation, error);
|
2015-04-15 14:53:30 -04:00
|
|
|
else
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_dbus_method_invocation_return_value (invocation, NULL);
|
2010-04-08 08:56:17 -07:00
|
|
|
}
|
2007-08-15 07:52:25 +00:00
|
|
|
|
2012-12-12 14:21:40 +01:00
|
|
|
static void
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
impl_manager_get_logging (NMDBusObject *obj,
|
|
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
|
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
|
|
|
|
{
|
|
|
|
|
g_dbus_method_invocation_return_value (invocation,
|
2015-04-15 14:53:30 -04:00
|
|
|
g_variant_new ("(ss)",
|
|
|
|
|
nm_logging_level_to_string (),
|
|
|
|
|
nm_logging_domains_to_string ()));
|
2012-12-12 14:21:40 +01:00
|
|
|
}
|
|
|
|
|
|
2017-03-27 15:22:22 +00:00
|
|
|
typedef struct {
|
connectivity: rework async connectivity check requests
An asynchronous request should either be cancellable or not keep
the target object alive. Preferably both.
Otherwise, it is impossible to do a controlled shutdown when terminating
NetworkManager. Currently, when NetworkManager is about to terminate,
it just quits the mainloop and essentially leaks everything. That is a
bug. If we ever want to fix that, every asynchronous request must be
cancellable in a controlled way (or it must not prevent objects from
getting disposed, where disposing the object automatically cancels the
callback).
Rework the asynchronous request for connectivity check to
- return a handle that can be used to cancel the operation.
Cancelling is optional. The caller may choose to ignore the handle
because the asynchronous operation does not keep the target object
alive. That means, it is still possible to shutdown, by everybody
giving up their reference to the target object. In which case the
callback will be invoked during dispose() of the target object.
- also, the callback will always be invoked exactly once, and never
synchronously from within the asynchronous start call. But during
cancel(), the callback is invoked synchronously from within cancel().
Note that it's only allowed to cancel an action at most once, and
never after the callback is invoked (also not from within the callback
itself).
- also, NMConnectivity already supports a fake handler, in case
connectivity check is disabled via configuration. Hence, reuse
the same code paths also when compiling without --enable-concheck.
That means, instead of having #if WITH_CONCHECK at various callers,
move them into NMConnectivity. The downside is, that if you build
without concheck, there is a small overhead compared to before. The
upside is, we reuse the same code paths when compiling with or without
concheck.
- also, the patch synchronizes the connecitivty states. For example,
previously `nmcli networking connectivity check` would schedule
requests in parallel, and return the accumulated result of the individual
requests.
However, the global connectivity state of the manager might have have
been the same as the answer to the explicit connecitivity check,
because while the answer for the manual check is waiting for all
pending checks to complete, the global connectivity state could
already change. That is just wrong. There are not multiple global
connectivity states at the same time, there is just one. A manual
connectivity check should have the meaning of ensure that the global
state is up to date, but it still should return the global
connectivity state -- not the answers for several connectivity checks
issued in parallel.
This is related to commit b799de281bc01073c31dd2c86171b29c8132441c
(libnm: update property in the manager after connectivity check),
which tries to address a similar problem client side.
Similarly, each device has a connectivity state. While there might
be several connectivity checks per device pending, whenever a check
completes, it can update the per-device state (and return that device
state as result), but the immediate answer of the individual check
might not matter. This is especially the case, when a later request
returns earlier and obsoletes all earlier requests. In that case,
earlier requests return with the result of the currend devices
connectivity state.
This patch cleans up the internal API and gives a better defined behavior
to the user (thus, the simple API which simplifies implementation for the
caller). However, the implementation of getting this API right and properly
handle cancel and destruction of the target object is more complicated and
complex. But this but is not just for the sake of a nicer API. This fixes
actual issues explained above.
Also, get rid of GAsyncResult to track information about the pending request.
Instead, allocate our own handle structure, which ends up to be nicer
because it's strongly typed and has exactly the properties that are
useful to track the request. Also, it gets rid of the awkward
_finish() API by passing the relevant arguments to the callback
directly.
2018-01-05 17:46:49 +01:00
|
|
|
NMManager *self;
|
2017-03-27 15:22:22 +00:00
|
|
|
GDBusMethodInvocation *context;
|
connectivity: rework async connectivity check requests
An asynchronous request should either be cancellable or not keep
the target object alive. Preferably both.
Otherwise, it is impossible to do a controlled shutdown when terminating
NetworkManager. Currently, when NetworkManager is about to terminate,
it just quits the mainloop and essentially leaks everything. That is a
bug. If we ever want to fix that, every asynchronous request must be
cancellable in a controlled way (or it must not prevent objects from
getting disposed, where disposing the object automatically cancels the
callback).
Rework the asynchronous request for connectivity check to
- return a handle that can be used to cancel the operation.
Cancelling is optional. The caller may choose to ignore the handle
because the asynchronous operation does not keep the target object
alive. That means, it is still possible to shutdown, by everybody
giving up their reference to the target object. In which case the
callback will be invoked during dispose() of the target object.
- also, the callback will always be invoked exactly once, and never
synchronously from within the asynchronous start call. But during
cancel(), the callback is invoked synchronously from within cancel().
Note that it's only allowed to cancel an action at most once, and
never after the callback is invoked (also not from within the callback
itself).
- also, NMConnectivity already supports a fake handler, in case
connectivity check is disabled via configuration. Hence, reuse
the same code paths also when compiling without --enable-concheck.
That means, instead of having #if WITH_CONCHECK at various callers,
move them into NMConnectivity. The downside is, that if you build
without concheck, there is a small overhead compared to before. The
upside is, we reuse the same code paths when compiling with or without
concheck.
- also, the patch synchronizes the connecitivty states. For example,
previously `nmcli networking connectivity check` would schedule
requests in parallel, and return the accumulated result of the individual
requests.
However, the global connectivity state of the manager might have have
been the same as the answer to the explicit connecitivity check,
because while the answer for the manual check is waiting for all
pending checks to complete, the global connectivity state could
already change. That is just wrong. There are not multiple global
connectivity states at the same time, there is just one. A manual
connectivity check should have the meaning of ensure that the global
state is up to date, but it still should return the global
connectivity state -- not the answers for several connectivity checks
issued in parallel.
This is related to commit b799de281bc01073c31dd2c86171b29c8132441c
(libnm: update property in the manager after connectivity check),
which tries to address a similar problem client side.
Similarly, each device has a connectivity state. While there might
be several connectivity checks per device pending, whenever a check
completes, it can update the per-device state (and return that device
state as result), but the immediate answer of the individual check
might not matter. This is especially the case, when a later request
returns earlier and obsoletes all earlier requests. In that case,
earlier requests return with the result of the currend devices
connectivity state.
This patch cleans up the internal API and gives a better defined behavior
to the user (thus, the simple API which simplifies implementation for the
caller). However, the implementation of getting this API right and properly
handle cancel and destruction of the target object is more complicated and
complex. But this but is not just for the sake of a nicer API. This fixes
actual issues explained above.
Also, get rid of GAsyncResult to track information about the pending request.
Instead, allocate our own handle structure, which ends up to be nicer
because it's strongly typed and has exactly the properties that are
useful to track the request. Also, it gets rid of the awkward
_finish() API by passing the relevant arguments to the callback
directly.
2018-01-05 17:46:49 +01:00
|
|
|
guint remaining;
|
2017-03-27 15:22:22 +00:00
|
|
|
} ConnectivityCheckData;
|
|
|
|
|
|
2013-07-30 16:31:31 -04:00
|
|
|
static void
|
connectivity: rework async connectivity check requests
An asynchronous request should either be cancellable or not keep
the target object alive. Preferably both.
Otherwise, it is impossible to do a controlled shutdown when terminating
NetworkManager. Currently, when NetworkManager is about to terminate,
it just quits the mainloop and essentially leaks everything. That is a
bug. If we ever want to fix that, every asynchronous request must be
cancellable in a controlled way (or it must not prevent objects from
getting disposed, where disposing the object automatically cancels the
callback).
Rework the asynchronous request for connectivity check to
- return a handle that can be used to cancel the operation.
Cancelling is optional. The caller may choose to ignore the handle
because the asynchronous operation does not keep the target object
alive. That means, it is still possible to shutdown, by everybody
giving up their reference to the target object. In which case the
callback will be invoked during dispose() of the target object.
- also, the callback will always be invoked exactly once, and never
synchronously from within the asynchronous start call. But during
cancel(), the callback is invoked synchronously from within cancel().
Note that it's only allowed to cancel an action at most once, and
never after the callback is invoked (also not from within the callback
itself).
- also, NMConnectivity already supports a fake handler, in case
connectivity check is disabled via configuration. Hence, reuse
the same code paths also when compiling without --enable-concheck.
That means, instead of having #if WITH_CONCHECK at various callers,
move them into NMConnectivity. The downside is, that if you build
without concheck, there is a small overhead compared to before. The
upside is, we reuse the same code paths when compiling with or without
concheck.
- also, the patch synchronizes the connecitivty states. For example,
previously `nmcli networking connectivity check` would schedule
requests in parallel, and return the accumulated result of the individual
requests.
However, the global connectivity state of the manager might have have
been the same as the answer to the explicit connecitivity check,
because while the answer for the manual check is waiting for all
pending checks to complete, the global connectivity state could
already change. That is just wrong. There are not multiple global
connectivity states at the same time, there is just one. A manual
connectivity check should have the meaning of ensure that the global
state is up to date, but it still should return the global
connectivity state -- not the answers for several connectivity checks
issued in parallel.
This is related to commit b799de281bc01073c31dd2c86171b29c8132441c
(libnm: update property in the manager after connectivity check),
which tries to address a similar problem client side.
Similarly, each device has a connectivity state. While there might
be several connectivity checks per device pending, whenever a check
completes, it can update the per-device state (and return that device
state as result), but the immediate answer of the individual check
might not matter. This is especially the case, when a later request
returns earlier and obsoletes all earlier requests. In that case,
earlier requests return with the result of the currend devices
connectivity state.
This patch cleans up the internal API and gives a better defined behavior
to the user (thus, the simple API which simplifies implementation for the
caller). However, the implementation of getting this API right and properly
handle cancel and destruction of the target object is more complicated and
complex. But this but is not just for the sake of a nicer API. This fixes
actual issues explained above.
Also, get rid of GAsyncResult to track information about the pending request.
Instead, allocate our own handle structure, which ends up to be nicer
because it's strongly typed and has exactly the properties that are
useful to track the request. Also, it gets rid of the awkward
_finish() API by passing the relevant arguments to the callback
directly.
2018-01-05 17:46:49 +01:00
|
|
|
device_connectivity_done (NMDevice *device,
|
|
|
|
|
NMDeviceConnectivityHandle *handle,
|
|
|
|
|
NMConnectivityState state,
|
|
|
|
|
GError *error,
|
|
|
|
|
gpointer user_data)
|
2013-07-30 16:31:31 -04:00
|
|
|
{
|
2017-03-27 15:22:22 +00:00
|
|
|
ConnectivityCheckData *data = user_data;
|
connectivity: rework async connectivity check requests
An asynchronous request should either be cancellable or not keep
the target object alive. Preferably both.
Otherwise, it is impossible to do a controlled shutdown when terminating
NetworkManager. Currently, when NetworkManager is about to terminate,
it just quits the mainloop and essentially leaks everything. That is a
bug. If we ever want to fix that, every asynchronous request must be
cancellable in a controlled way (or it must not prevent objects from
getting disposed, where disposing the object automatically cancels the
callback).
Rework the asynchronous request for connectivity check to
- return a handle that can be used to cancel the operation.
Cancelling is optional. The caller may choose to ignore the handle
because the asynchronous operation does not keep the target object
alive. That means, it is still possible to shutdown, by everybody
giving up their reference to the target object. In which case the
callback will be invoked during dispose() of the target object.
- also, the callback will always be invoked exactly once, and never
synchronously from within the asynchronous start call. But during
cancel(), the callback is invoked synchronously from within cancel().
Note that it's only allowed to cancel an action at most once, and
never after the callback is invoked (also not from within the callback
itself).
- also, NMConnectivity already supports a fake handler, in case
connectivity check is disabled via configuration. Hence, reuse
the same code paths also when compiling without --enable-concheck.
That means, instead of having #if WITH_CONCHECK at various callers,
move them into NMConnectivity. The downside is, that if you build
without concheck, there is a small overhead compared to before. The
upside is, we reuse the same code paths when compiling with or without
concheck.
- also, the patch synchronizes the connecitivty states. For example,
previously `nmcli networking connectivity check` would schedule
requests in parallel, and return the accumulated result of the individual
requests.
However, the global connectivity state of the manager might have have
been the same as the answer to the explicit connecitivity check,
because while the answer for the manual check is waiting for all
pending checks to complete, the global connectivity state could
already change. That is just wrong. There are not multiple global
connectivity states at the same time, there is just one. A manual
connectivity check should have the meaning of ensure that the global
state is up to date, but it still should return the global
connectivity state -- not the answers for several connectivity checks
issued in parallel.
This is related to commit b799de281bc01073c31dd2c86171b29c8132441c
(libnm: update property in the manager after connectivity check),
which tries to address a similar problem client side.
Similarly, each device has a connectivity state. While there might
be several connectivity checks per device pending, whenever a check
completes, it can update the per-device state (and return that device
state as result), but the immediate answer of the individual check
might not matter. This is especially the case, when a later request
returns earlier and obsoletes all earlier requests. In that case,
earlier requests return with the result of the currend devices
connectivity state.
This patch cleans up the internal API and gives a better defined behavior
to the user (thus, the simple API which simplifies implementation for the
caller). However, the implementation of getting this API right and properly
handle cancel and destruction of the target object is more complicated and
complex. But this but is not just for the sake of a nicer API. This fixes
actual issues explained above.
Also, get rid of GAsyncResult to track information about the pending request.
Instead, allocate our own handle structure, which ends up to be nicer
because it's strongly typed and has exactly the properties that are
useful to track the request. Also, it gets rid of the awkward
_finish() API by passing the relevant arguments to the callback
directly.
2018-01-05 17:46:49 +01:00
|
|
|
NMManager *self;
|
|
|
|
|
NMManagerPrivate *priv;
|
2013-07-30 16:31:31 -04:00
|
|
|
|
connectivity: rework async connectivity check requests
An asynchronous request should either be cancellable or not keep
the target object alive. Preferably both.
Otherwise, it is impossible to do a controlled shutdown when terminating
NetworkManager. Currently, when NetworkManager is about to terminate,
it just quits the mainloop and essentially leaks everything. That is a
bug. If we ever want to fix that, every asynchronous request must be
cancellable in a controlled way (or it must not prevent objects from
getting disposed, where disposing the object automatically cancels the
callback).
Rework the asynchronous request for connectivity check to
- return a handle that can be used to cancel the operation.
Cancelling is optional. The caller may choose to ignore the handle
because the asynchronous operation does not keep the target object
alive. That means, it is still possible to shutdown, by everybody
giving up their reference to the target object. In which case the
callback will be invoked during dispose() of the target object.
- also, the callback will always be invoked exactly once, and never
synchronously from within the asynchronous start call. But during
cancel(), the callback is invoked synchronously from within cancel().
Note that it's only allowed to cancel an action at most once, and
never after the callback is invoked (also not from within the callback
itself).
- also, NMConnectivity already supports a fake handler, in case
connectivity check is disabled via configuration. Hence, reuse
the same code paths also when compiling without --enable-concheck.
That means, instead of having #if WITH_CONCHECK at various callers,
move them into NMConnectivity. The downside is, that if you build
without concheck, there is a small overhead compared to before. The
upside is, we reuse the same code paths when compiling with or without
concheck.
- also, the patch synchronizes the connecitivty states. For example,
previously `nmcli networking connectivity check` would schedule
requests in parallel, and return the accumulated result of the individual
requests.
However, the global connectivity state of the manager might have have
been the same as the answer to the explicit connecitivity check,
because while the answer for the manual check is waiting for all
pending checks to complete, the global connectivity state could
already change. That is just wrong. There are not multiple global
connectivity states at the same time, there is just one. A manual
connectivity check should have the meaning of ensure that the global
state is up to date, but it still should return the global
connectivity state -- not the answers for several connectivity checks
issued in parallel.
This is related to commit b799de281bc01073c31dd2c86171b29c8132441c
(libnm: update property in the manager after connectivity check),
which tries to address a similar problem client side.
Similarly, each device has a connectivity state. While there might
be several connectivity checks per device pending, whenever a check
completes, it can update the per-device state (and return that device
state as result), but the immediate answer of the individual check
might not matter. This is especially the case, when a later request
returns earlier and obsoletes all earlier requests. In that case,
earlier requests return with the result of the currend devices
connectivity state.
This patch cleans up the internal API and gives a better defined behavior
to the user (thus, the simple API which simplifies implementation for the
caller). However, the implementation of getting this API right and properly
handle cancel and destruction of the target object is more complicated and
complex. But this but is not just for the sake of a nicer API. This fixes
actual issues explained above.
Also, get rid of GAsyncResult to track information about the pending request.
Instead, allocate our own handle structure, which ends up to be nicer
because it's strongly typed and has exactly the properties that are
useful to track the request. Also, it gets rid of the awkward
_finish() API by passing the relevant arguments to the callback
directly.
2018-01-05 17:46:49 +01:00
|
|
|
nm_assert (data);
|
|
|
|
|
nm_assert (data->remaining > 0);
|
|
|
|
|
nm_assert (NM_IS_MANAGER (data->self));
|
2017-03-27 15:22:22 +00:00
|
|
|
|
connectivity: rework async connectivity check requests
An asynchronous request should either be cancellable or not keep
the target object alive. Preferably both.
Otherwise, it is impossible to do a controlled shutdown when terminating
NetworkManager. Currently, when NetworkManager is about to terminate,
it just quits the mainloop and essentially leaks everything. That is a
bug. If we ever want to fix that, every asynchronous request must be
cancellable in a controlled way (or it must not prevent objects from
getting disposed, where disposing the object automatically cancels the
callback).
Rework the asynchronous request for connectivity check to
- return a handle that can be used to cancel the operation.
Cancelling is optional. The caller may choose to ignore the handle
because the asynchronous operation does not keep the target object
alive. That means, it is still possible to shutdown, by everybody
giving up their reference to the target object. In which case the
callback will be invoked during dispose() of the target object.
- also, the callback will always be invoked exactly once, and never
synchronously from within the asynchronous start call. But during
cancel(), the callback is invoked synchronously from within cancel().
Note that it's only allowed to cancel an action at most once, and
never after the callback is invoked (also not from within the callback
itself).
- also, NMConnectivity already supports a fake handler, in case
connectivity check is disabled via configuration. Hence, reuse
the same code paths also when compiling without --enable-concheck.
That means, instead of having #if WITH_CONCHECK at various callers,
move them into NMConnectivity. The downside is, that if you build
without concheck, there is a small overhead compared to before. The
upside is, we reuse the same code paths when compiling with or without
concheck.
- also, the patch synchronizes the connecitivty states. For example,
previously `nmcli networking connectivity check` would schedule
requests in parallel, and return the accumulated result of the individual
requests.
However, the global connectivity state of the manager might have have
been the same as the answer to the explicit connecitivity check,
because while the answer for the manual check is waiting for all
pending checks to complete, the global connectivity state could
already change. That is just wrong. There are not multiple global
connectivity states at the same time, there is just one. A manual
connectivity check should have the meaning of ensure that the global
state is up to date, but it still should return the global
connectivity state -- not the answers for several connectivity checks
issued in parallel.
This is related to commit b799de281bc01073c31dd2c86171b29c8132441c
(libnm: update property in the manager after connectivity check),
which tries to address a similar problem client side.
Similarly, each device has a connectivity state. While there might
be several connectivity checks per device pending, whenever a check
completes, it can update the per-device state (and return that device
state as result), but the immediate answer of the individual check
might not matter. This is especially the case, when a later request
returns earlier and obsoletes all earlier requests. In that case,
earlier requests return with the result of the currend devices
connectivity state.
This patch cleans up the internal API and gives a better defined behavior
to the user (thus, the simple API which simplifies implementation for the
caller). However, the implementation of getting this API right and properly
handle cancel and destruction of the target object is more complicated and
complex. But this but is not just for the sake of a nicer API. This fixes
actual issues explained above.
Also, get rid of GAsyncResult to track information about the pending request.
Instead, allocate our own handle structure, which ends up to be nicer
because it's strongly typed and has exactly the properties that are
useful to track the request. Also, it gets rid of the awkward
_finish() API by passing the relevant arguments to the callback
directly.
2018-01-05 17:46:49 +01:00
|
|
|
data->remaining--;
|
2017-03-27 15:22:22 +00:00
|
|
|
|
connectivity: rework async connectivity check requests
An asynchronous request should either be cancellable or not keep
the target object alive. Preferably both.
Otherwise, it is impossible to do a controlled shutdown when terminating
NetworkManager. Currently, when NetworkManager is about to terminate,
it just quits the mainloop and essentially leaks everything. That is a
bug. If we ever want to fix that, every asynchronous request must be
cancellable in a controlled way (or it must not prevent objects from
getting disposed, where disposing the object automatically cancels the
callback).
Rework the asynchronous request for connectivity check to
- return a handle that can be used to cancel the operation.
Cancelling is optional. The caller may choose to ignore the handle
because the asynchronous operation does not keep the target object
alive. That means, it is still possible to shutdown, by everybody
giving up their reference to the target object. In which case the
callback will be invoked during dispose() of the target object.
- also, the callback will always be invoked exactly once, and never
synchronously from within the asynchronous start call. But during
cancel(), the callback is invoked synchronously from within cancel().
Note that it's only allowed to cancel an action at most once, and
never after the callback is invoked (also not from within the callback
itself).
- also, NMConnectivity already supports a fake handler, in case
connectivity check is disabled via configuration. Hence, reuse
the same code paths also when compiling without --enable-concheck.
That means, instead of having #if WITH_CONCHECK at various callers,
move them into NMConnectivity. The downside is, that if you build
without concheck, there is a small overhead compared to before. The
upside is, we reuse the same code paths when compiling with or without
concheck.
- also, the patch synchronizes the connecitivty states. For example,
previously `nmcli networking connectivity check` would schedule
requests in parallel, and return the accumulated result of the individual
requests.
However, the global connectivity state of the manager might have have
been the same as the answer to the explicit connecitivity check,
because while the answer for the manual check is waiting for all
pending checks to complete, the global connectivity state could
already change. That is just wrong. There are not multiple global
connectivity states at the same time, there is just one. A manual
connectivity check should have the meaning of ensure that the global
state is up to date, but it still should return the global
connectivity state -- not the answers for several connectivity checks
issued in parallel.
This is related to commit b799de281bc01073c31dd2c86171b29c8132441c
(libnm: update property in the manager after connectivity check),
which tries to address a similar problem client side.
Similarly, each device has a connectivity state. While there might
be several connectivity checks per device pending, whenever a check
completes, it can update the per-device state (and return that device
state as result), but the immediate answer of the individual check
might not matter. This is especially the case, when a later request
returns earlier and obsoletes all earlier requests. In that case,
earlier requests return with the result of the currend devices
connectivity state.
This patch cleans up the internal API and gives a better defined behavior
to the user (thus, the simple API which simplifies implementation for the
caller). However, the implementation of getting this API right and properly
handle cancel and destruction of the target object is more complicated and
complex. But this but is not just for the sake of a nicer API. This fixes
actual issues explained above.
Also, get rid of GAsyncResult to track information about the pending request.
Instead, allocate our own handle structure, which ends up to be nicer
because it's strongly typed and has exactly the properties that are
useful to track the request. Also, it gets rid of the awkward
_finish() API by passing the relevant arguments to the callback
directly.
2018-01-05 17:46:49 +01:00
|
|
|
self = data->self;
|
|
|
|
|
priv = NM_MANAGER_GET_PRIVATE (self);
|
2017-03-27 15:22:22 +00:00
|
|
|
|
connectivity: rework async connectivity check requests
An asynchronous request should either be cancellable or not keep
the target object alive. Preferably both.
Otherwise, it is impossible to do a controlled shutdown when terminating
NetworkManager. Currently, when NetworkManager is about to terminate,
it just quits the mainloop and essentially leaks everything. That is a
bug. If we ever want to fix that, every asynchronous request must be
cancellable in a controlled way (or it must not prevent objects from
getting disposed, where disposing the object automatically cancels the
callback).
Rework the asynchronous request for connectivity check to
- return a handle that can be used to cancel the operation.
Cancelling is optional. The caller may choose to ignore the handle
because the asynchronous operation does not keep the target object
alive. That means, it is still possible to shutdown, by everybody
giving up their reference to the target object. In which case the
callback will be invoked during dispose() of the target object.
- also, the callback will always be invoked exactly once, and never
synchronously from within the asynchronous start call. But during
cancel(), the callback is invoked synchronously from within cancel().
Note that it's only allowed to cancel an action at most once, and
never after the callback is invoked (also not from within the callback
itself).
- also, NMConnectivity already supports a fake handler, in case
connectivity check is disabled via configuration. Hence, reuse
the same code paths also when compiling without --enable-concheck.
That means, instead of having #if WITH_CONCHECK at various callers,
move them into NMConnectivity. The downside is, that if you build
without concheck, there is a small overhead compared to before. The
upside is, we reuse the same code paths when compiling with or without
concheck.
- also, the patch synchronizes the connecitivty states. For example,
previously `nmcli networking connectivity check` would schedule
requests in parallel, and return the accumulated result of the individual
requests.
However, the global connectivity state of the manager might have have
been the same as the answer to the explicit connecitivity check,
because while the answer for the manual check is waiting for all
pending checks to complete, the global connectivity state could
already change. That is just wrong. There are not multiple global
connectivity states at the same time, there is just one. A manual
connectivity check should have the meaning of ensure that the global
state is up to date, but it still should return the global
connectivity state -- not the answers for several connectivity checks
issued in parallel.
This is related to commit b799de281bc01073c31dd2c86171b29c8132441c
(libnm: update property in the manager after connectivity check),
which tries to address a similar problem client side.
Similarly, each device has a connectivity state. While there might
be several connectivity checks per device pending, whenever a check
completes, it can update the per-device state (and return that device
state as result), but the immediate answer of the individual check
might not matter. This is especially the case, when a later request
returns earlier and obsoletes all earlier requests. In that case,
earlier requests return with the result of the currend devices
connectivity state.
This patch cleans up the internal API and gives a better defined behavior
to the user (thus, the simple API which simplifies implementation for the
caller). However, the implementation of getting this API right and properly
handle cancel and destruction of the target object is more complicated and
complex. But this but is not just for the sake of a nicer API. This fixes
actual issues explained above.
Also, get rid of GAsyncResult to track information about the pending request.
Instead, allocate our own handle structure, which ends up to be nicer
because it's strongly typed and has exactly the properties that are
useful to track the request. Also, it gets rid of the awkward
_finish() API by passing the relevant arguments to the callback
directly.
2018-01-05 17:46:49 +01:00
|
|
|
if ( data->context
|
|
|
|
|
&& ( data->remaining == 0
|
|
|
|
|
|| ( state == NM_CONNECTIVITY_FULL
|
|
|
|
|
&& priv->connectivity_state == NM_CONNECTIVITY_FULL))) {
|
|
|
|
|
/* despite having a @handle and @state returned by the requests, we always
|
|
|
|
|
* return the current connectivity_state. That is, because the connectivity_state
|
|
|
|
|
* and the answer to the connectivity check shall agree.
|
|
|
|
|
*
|
|
|
|
|
* However, if one of the requests (early) returns full connectivity and agrees with
|
|
|
|
|
* the accumulated connectivity state, we no longer have to wait. The result is set.
|
|
|
|
|
*
|
2018-04-18 14:13:28 +02:00
|
|
|
* This also works well, because NMDevice first emits change signals to its own
|
connectivity: rework async connectivity check requests
An asynchronous request should either be cancellable or not keep
the target object alive. Preferably both.
Otherwise, it is impossible to do a controlled shutdown when terminating
NetworkManager. Currently, when NetworkManager is about to terminate,
it just quits the mainloop and essentially leaks everything. That is a
bug. If we ever want to fix that, every asynchronous request must be
cancellable in a controlled way (or it must not prevent objects from
getting disposed, where disposing the object automatically cancels the
callback).
Rework the asynchronous request for connectivity check to
- return a handle that can be used to cancel the operation.
Cancelling is optional. The caller may choose to ignore the handle
because the asynchronous operation does not keep the target object
alive. That means, it is still possible to shutdown, by everybody
giving up their reference to the target object. In which case the
callback will be invoked during dispose() of the target object.
- also, the callback will always be invoked exactly once, and never
synchronously from within the asynchronous start call. But during
cancel(), the callback is invoked synchronously from within cancel().
Note that it's only allowed to cancel an action at most once, and
never after the callback is invoked (also not from within the callback
itself).
- also, NMConnectivity already supports a fake handler, in case
connectivity check is disabled via configuration. Hence, reuse
the same code paths also when compiling without --enable-concheck.
That means, instead of having #if WITH_CONCHECK at various callers,
move them into NMConnectivity. The downside is, that if you build
without concheck, there is a small overhead compared to before. The
upside is, we reuse the same code paths when compiling with or without
concheck.
- also, the patch synchronizes the connecitivty states. For example,
previously `nmcli networking connectivity check` would schedule
requests in parallel, and return the accumulated result of the individual
requests.
However, the global connectivity state of the manager might have have
been the same as the answer to the explicit connecitivity check,
because while the answer for the manual check is waiting for all
pending checks to complete, the global connectivity state could
already change. That is just wrong. There are not multiple global
connectivity states at the same time, there is just one. A manual
connectivity check should have the meaning of ensure that the global
state is up to date, but it still should return the global
connectivity state -- not the answers for several connectivity checks
issued in parallel.
This is related to commit b799de281bc01073c31dd2c86171b29c8132441c
(libnm: update property in the manager after connectivity check),
which tries to address a similar problem client side.
Similarly, each device has a connectivity state. While there might
be several connectivity checks per device pending, whenever a check
completes, it can update the per-device state (and return that device
state as result), but the immediate answer of the individual check
might not matter. This is especially the case, when a later request
returns earlier and obsoletes all earlier requests. In that case,
earlier requests return with the result of the currend devices
connectivity state.
This patch cleans up the internal API and gives a better defined behavior
to the user (thus, the simple API which simplifies implementation for the
caller). However, the implementation of getting this API right and properly
handle cancel and destruction of the target object is more complicated and
complex. But this but is not just for the sake of a nicer API. This fixes
actual issues explained above.
Also, get rid of GAsyncResult to track information about the pending request.
Instead, allocate our own handle structure, which ends up to be nicer
because it's strongly typed and has exactly the properties that are
useful to track the request. Also, it gets rid of the awkward
_finish() API by passing the relevant arguments to the callback
directly.
2018-01-05 17:46:49 +01:00
|
|
|
* connectivity state, which is then taken into account for the accumulated global
|
|
|
|
|
* state. All this happens, before the callback is invoked. */
|
|
|
|
|
g_dbus_method_invocation_return_value (g_steal_pointer (&data->context),
|
|
|
|
|
g_variant_new ("(u)",
|
|
|
|
|
(guint) priv->connectivity_state));
|
2017-03-27 15:22:22 +00:00
|
|
|
}
|
|
|
|
|
|
connectivity: rework async connectivity check requests
An asynchronous request should either be cancellable or not keep
the target object alive. Preferably both.
Otherwise, it is impossible to do a controlled shutdown when terminating
NetworkManager. Currently, when NetworkManager is about to terminate,
it just quits the mainloop and essentially leaks everything. That is a
bug. If we ever want to fix that, every asynchronous request must be
cancellable in a controlled way (or it must not prevent objects from
getting disposed, where disposing the object automatically cancels the
callback).
Rework the asynchronous request for connectivity check to
- return a handle that can be used to cancel the operation.
Cancelling is optional. The caller may choose to ignore the handle
because the asynchronous operation does not keep the target object
alive. That means, it is still possible to shutdown, by everybody
giving up their reference to the target object. In which case the
callback will be invoked during dispose() of the target object.
- also, the callback will always be invoked exactly once, and never
synchronously from within the asynchronous start call. But during
cancel(), the callback is invoked synchronously from within cancel().
Note that it's only allowed to cancel an action at most once, and
never after the callback is invoked (also not from within the callback
itself).
- also, NMConnectivity already supports a fake handler, in case
connectivity check is disabled via configuration. Hence, reuse
the same code paths also when compiling without --enable-concheck.
That means, instead of having #if WITH_CONCHECK at various callers,
move them into NMConnectivity. The downside is, that if you build
without concheck, there is a small overhead compared to before. The
upside is, we reuse the same code paths when compiling with or without
concheck.
- also, the patch synchronizes the connecitivty states. For example,
previously `nmcli networking connectivity check` would schedule
requests in parallel, and return the accumulated result of the individual
requests.
However, the global connectivity state of the manager might have have
been the same as the answer to the explicit connecitivity check,
because while the answer for the manual check is waiting for all
pending checks to complete, the global connectivity state could
already change. That is just wrong. There are not multiple global
connectivity states at the same time, there is just one. A manual
connectivity check should have the meaning of ensure that the global
state is up to date, but it still should return the global
connectivity state -- not the answers for several connectivity checks
issued in parallel.
This is related to commit b799de281bc01073c31dd2c86171b29c8132441c
(libnm: update property in the manager after connectivity check),
which tries to address a similar problem client side.
Similarly, each device has a connectivity state. While there might
be several connectivity checks per device pending, whenever a check
completes, it can update the per-device state (and return that device
state as result), but the immediate answer of the individual check
might not matter. This is especially the case, when a later request
returns earlier and obsoletes all earlier requests. In that case,
earlier requests return with the result of the currend devices
connectivity state.
This patch cleans up the internal API and gives a better defined behavior
to the user (thus, the simple API which simplifies implementation for the
caller). However, the implementation of getting this API right and properly
handle cancel and destruction of the target object is more complicated and
complex. But this but is not just for the sake of a nicer API. This fixes
actual issues explained above.
Also, get rid of GAsyncResult to track information about the pending request.
Instead, allocate our own handle structure, which ends up to be nicer
because it's strongly typed and has exactly the properties that are
useful to track the request. Also, it gets rid of the awkward
_finish() API by passing the relevant arguments to the callback
directly.
2018-01-05 17:46:49 +01:00
|
|
|
if (data->remaining == 0) {
|
|
|
|
|
g_object_unref (self);
|
2017-03-27 15:22:22 +00:00
|
|
|
g_slice_free (ConnectivityCheckData, data);
|
connectivity: rework async connectivity check requests
An asynchronous request should either be cancellable or not keep
the target object alive. Preferably both.
Otherwise, it is impossible to do a controlled shutdown when terminating
NetworkManager. Currently, when NetworkManager is about to terminate,
it just quits the mainloop and essentially leaks everything. That is a
bug. If we ever want to fix that, every asynchronous request must be
cancellable in a controlled way (or it must not prevent objects from
getting disposed, where disposing the object automatically cancels the
callback).
Rework the asynchronous request for connectivity check to
- return a handle that can be used to cancel the operation.
Cancelling is optional. The caller may choose to ignore the handle
because the asynchronous operation does not keep the target object
alive. That means, it is still possible to shutdown, by everybody
giving up their reference to the target object. In which case the
callback will be invoked during dispose() of the target object.
- also, the callback will always be invoked exactly once, and never
synchronously from within the asynchronous start call. But during
cancel(), the callback is invoked synchronously from within cancel().
Note that it's only allowed to cancel an action at most once, and
never after the callback is invoked (also not from within the callback
itself).
- also, NMConnectivity already supports a fake handler, in case
connectivity check is disabled via configuration. Hence, reuse
the same code paths also when compiling without --enable-concheck.
That means, instead of having #if WITH_CONCHECK at various callers,
move them into NMConnectivity. The downside is, that if you build
without concheck, there is a small overhead compared to before. The
upside is, we reuse the same code paths when compiling with or without
concheck.
- also, the patch synchronizes the connecitivty states. For example,
previously `nmcli networking connectivity check` would schedule
requests in parallel, and return the accumulated result of the individual
requests.
However, the global connectivity state of the manager might have have
been the same as the answer to the explicit connecitivity check,
because while the answer for the manual check is waiting for all
pending checks to complete, the global connectivity state could
already change. That is just wrong. There are not multiple global
connectivity states at the same time, there is just one. A manual
connectivity check should have the meaning of ensure that the global
state is up to date, but it still should return the global
connectivity state -- not the answers for several connectivity checks
issued in parallel.
This is related to commit b799de281bc01073c31dd2c86171b29c8132441c
(libnm: update property in the manager after connectivity check),
which tries to address a similar problem client side.
Similarly, each device has a connectivity state. While there might
be several connectivity checks per device pending, whenever a check
completes, it can update the per-device state (and return that device
state as result), but the immediate answer of the individual check
might not matter. This is especially the case, when a later request
returns earlier and obsoletes all earlier requests. In that case,
earlier requests return with the result of the currend devices
connectivity state.
This patch cleans up the internal API and gives a better defined behavior
to the user (thus, the simple API which simplifies implementation for the
caller). However, the implementation of getting this API right and properly
handle cancel and destruction of the target object is more complicated and
complex. But this but is not just for the sake of a nicer API. This fixes
actual issues explained above.
Also, get rid of GAsyncResult to track information about the pending request.
Instead, allocate our own handle structure, which ends up to be nicer
because it's strongly typed and has exactly the properties that are
useful to track the request. Also, it gets rid of the awkward
_finish() API by passing the relevant arguments to the callback
directly.
2018-01-05 17:46:49 +01:00
|
|
|
}
|
2013-07-30 16:31:31 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
check_connectivity_auth_done_cb (NMAuthChain *chain,
|
|
|
|
|
GError *auth_error,
|
2015-04-15 14:53:30 -04:00
|
|
|
GDBusMethodInvocation *context,
|
2013-07-30 16:31:31 -04:00
|
|
|
gpointer user_data)
|
|
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER (user_data);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
GError *error = NULL;
|
|
|
|
|
NMAuthCallResult result;
|
2017-03-27 15:22:22 +00:00
|
|
|
ConnectivityCheckData *data;
|
connectivity: rework async connectivity check requests
An asynchronous request should either be cancellable or not keep
the target object alive. Preferably both.
Otherwise, it is impossible to do a controlled shutdown when terminating
NetworkManager. Currently, when NetworkManager is about to terminate,
it just quits the mainloop and essentially leaks everything. That is a
bug. If we ever want to fix that, every asynchronous request must be
cancellable in a controlled way (or it must not prevent objects from
getting disposed, where disposing the object automatically cancels the
callback).
Rework the asynchronous request for connectivity check to
- return a handle that can be used to cancel the operation.
Cancelling is optional. The caller may choose to ignore the handle
because the asynchronous operation does not keep the target object
alive. That means, it is still possible to shutdown, by everybody
giving up their reference to the target object. In which case the
callback will be invoked during dispose() of the target object.
- also, the callback will always be invoked exactly once, and never
synchronously from within the asynchronous start call. But during
cancel(), the callback is invoked synchronously from within cancel().
Note that it's only allowed to cancel an action at most once, and
never after the callback is invoked (also not from within the callback
itself).
- also, NMConnectivity already supports a fake handler, in case
connectivity check is disabled via configuration. Hence, reuse
the same code paths also when compiling without --enable-concheck.
That means, instead of having #if WITH_CONCHECK at various callers,
move them into NMConnectivity. The downside is, that if you build
without concheck, there is a small overhead compared to before. The
upside is, we reuse the same code paths when compiling with or without
concheck.
- also, the patch synchronizes the connecitivty states. For example,
previously `nmcli networking connectivity check` would schedule
requests in parallel, and return the accumulated result of the individual
requests.
However, the global connectivity state of the manager might have have
been the same as the answer to the explicit connecitivity check,
because while the answer for the manual check is waiting for all
pending checks to complete, the global connectivity state could
already change. That is just wrong. There are not multiple global
connectivity states at the same time, there is just one. A manual
connectivity check should have the meaning of ensure that the global
state is up to date, but it still should return the global
connectivity state -- not the answers for several connectivity checks
issued in parallel.
This is related to commit b799de281bc01073c31dd2c86171b29c8132441c
(libnm: update property in the manager after connectivity check),
which tries to address a similar problem client side.
Similarly, each device has a connectivity state. While there might
be several connectivity checks per device pending, whenever a check
completes, it can update the per-device state (and return that device
state as result), but the immediate answer of the individual check
might not matter. This is especially the case, when a later request
returns earlier and obsoletes all earlier requests. In that case,
earlier requests return with the result of the currend devices
connectivity state.
This patch cleans up the internal API and gives a better defined behavior
to the user (thus, the simple API which simplifies implementation for the
caller). However, the implementation of getting this API right and properly
handle cancel and destruction of the target object is more complicated and
complex. But this but is not just for the sake of a nicer API. This fixes
actual issues explained above.
Also, get rid of GAsyncResult to track information about the pending request.
Instead, allocate our own handle structure, which ends up to be nicer
because it's strongly typed and has exactly the properties that are
useful to track the request. Also, it gets rid of the awkward
_finish() API by passing the relevant arguments to the callback
directly.
2018-01-05 17:46:49 +01:00
|
|
|
NMDevice *device;
|
2013-07-30 16:31:31 -04:00
|
|
|
|
|
|
|
|
priv->auth_chains = g_slist_remove (priv->auth_chains, chain);
|
|
|
|
|
|
|
|
|
|
result = nm_auth_chain_get_result (chain, NM_AUTH_PERMISSION_NETWORK_CONTROL);
|
|
|
|
|
|
|
|
|
|
if (auth_error) {
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGD (LOGD_CORE, "CheckConnectivity request failed: %s", auth_error->message);
|
2013-07-30 16:31:31 -04:00
|
|
|
error = g_error_new (NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
"Connectivity check request failed: %s",
|
|
|
|
|
auth_error->message);
|
|
|
|
|
} else if (result != NM_AUTH_CALL_RESULT_YES) {
|
|
|
|
|
error = g_error_new_literal (NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
"Not authorized to recheck connectivity");
|
connectivity: rework async connectivity check requests
An asynchronous request should either be cancellable or not keep
the target object alive. Preferably both.
Otherwise, it is impossible to do a controlled shutdown when terminating
NetworkManager. Currently, when NetworkManager is about to terminate,
it just quits the mainloop and essentially leaks everything. That is a
bug. If we ever want to fix that, every asynchronous request must be
cancellable in a controlled way (or it must not prevent objects from
getting disposed, where disposing the object automatically cancels the
callback).
Rework the asynchronous request for connectivity check to
- return a handle that can be used to cancel the operation.
Cancelling is optional. The caller may choose to ignore the handle
because the asynchronous operation does not keep the target object
alive. That means, it is still possible to shutdown, by everybody
giving up their reference to the target object. In which case the
callback will be invoked during dispose() of the target object.
- also, the callback will always be invoked exactly once, and never
synchronously from within the asynchronous start call. But during
cancel(), the callback is invoked synchronously from within cancel().
Note that it's only allowed to cancel an action at most once, and
never after the callback is invoked (also not from within the callback
itself).
- also, NMConnectivity already supports a fake handler, in case
connectivity check is disabled via configuration. Hence, reuse
the same code paths also when compiling without --enable-concheck.
That means, instead of having #if WITH_CONCHECK at various callers,
move them into NMConnectivity. The downside is, that if you build
without concheck, there is a small overhead compared to before. The
upside is, we reuse the same code paths when compiling with or without
concheck.
- also, the patch synchronizes the connecitivty states. For example,
previously `nmcli networking connectivity check` would schedule
requests in parallel, and return the accumulated result of the individual
requests.
However, the global connectivity state of the manager might have have
been the same as the answer to the explicit connecitivity check,
because while the answer for the manual check is waiting for all
pending checks to complete, the global connectivity state could
already change. That is just wrong. There are not multiple global
connectivity states at the same time, there is just one. A manual
connectivity check should have the meaning of ensure that the global
state is up to date, but it still should return the global
connectivity state -- not the answers for several connectivity checks
issued in parallel.
This is related to commit b799de281bc01073c31dd2c86171b29c8132441c
(libnm: update property in the manager after connectivity check),
which tries to address a similar problem client side.
Similarly, each device has a connectivity state. While there might
be several connectivity checks per device pending, whenever a check
completes, it can update the per-device state (and return that device
state as result), but the immediate answer of the individual check
might not matter. This is especially the case, when a later request
returns earlier and obsoletes all earlier requests. In that case,
earlier requests return with the result of the currend devices
connectivity state.
This patch cleans up the internal API and gives a better defined behavior
to the user (thus, the simple API which simplifies implementation for the
caller). However, the implementation of getting this API right and properly
handle cancel and destruction of the target object is more complicated and
complex. But this but is not just for the sake of a nicer API. This fixes
actual issues explained above.
Also, get rid of GAsyncResult to track information about the pending request.
Instead, allocate our own handle structure, which ends up to be nicer
because it's strongly typed and has exactly the properties that are
useful to track the request. Also, it gets rid of the awkward
_finish() API by passing the relevant arguments to the callback
directly.
2018-01-05 17:46:49 +01:00
|
|
|
}
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
|
connectivity: rework async connectivity check requests
An asynchronous request should either be cancellable or not keep
the target object alive. Preferably both.
Otherwise, it is impossible to do a controlled shutdown when terminating
NetworkManager. Currently, when NetworkManager is about to terminate,
it just quits the mainloop and essentially leaks everything. That is a
bug. If we ever want to fix that, every asynchronous request must be
cancellable in a controlled way (or it must not prevent objects from
getting disposed, where disposing the object automatically cancels the
callback).
Rework the asynchronous request for connectivity check to
- return a handle that can be used to cancel the operation.
Cancelling is optional. The caller may choose to ignore the handle
because the asynchronous operation does not keep the target object
alive. That means, it is still possible to shutdown, by everybody
giving up their reference to the target object. In which case the
callback will be invoked during dispose() of the target object.
- also, the callback will always be invoked exactly once, and never
synchronously from within the asynchronous start call. But during
cancel(), the callback is invoked synchronously from within cancel().
Note that it's only allowed to cancel an action at most once, and
never after the callback is invoked (also not from within the callback
itself).
- also, NMConnectivity already supports a fake handler, in case
connectivity check is disabled via configuration. Hence, reuse
the same code paths also when compiling without --enable-concheck.
That means, instead of having #if WITH_CONCHECK at various callers,
move them into NMConnectivity. The downside is, that if you build
without concheck, there is a small overhead compared to before. The
upside is, we reuse the same code paths when compiling with or without
concheck.
- also, the patch synchronizes the connecitivty states. For example,
previously `nmcli networking connectivity check` would schedule
requests in parallel, and return the accumulated result of the individual
requests.
However, the global connectivity state of the manager might have have
been the same as the answer to the explicit connecitivity check,
because while the answer for the manual check is waiting for all
pending checks to complete, the global connectivity state could
already change. That is just wrong. There are not multiple global
connectivity states at the same time, there is just one. A manual
connectivity check should have the meaning of ensure that the global
state is up to date, but it still should return the global
connectivity state -- not the answers for several connectivity checks
issued in parallel.
This is related to commit b799de281bc01073c31dd2c86171b29c8132441c
(libnm: update property in the manager after connectivity check),
which tries to address a similar problem client side.
Similarly, each device has a connectivity state. While there might
be several connectivity checks per device pending, whenever a check
completes, it can update the per-device state (and return that device
state as result), but the immediate answer of the individual check
might not matter. This is especially the case, when a later request
returns earlier and obsoletes all earlier requests. In that case,
earlier requests return with the result of the currend devices
connectivity state.
This patch cleans up the internal API and gives a better defined behavior
to the user (thus, the simple API which simplifies implementation for the
caller). However, the implementation of getting this API right and properly
handle cancel and destruction of the target object is more complicated and
complex. But this but is not just for the sake of a nicer API. This fixes
actual issues explained above.
Also, get rid of GAsyncResult to track information about the pending request.
Instead, allocate our own handle structure, which ends up to be nicer
because it's strongly typed and has exactly the properties that are
useful to track the request. Also, it gets rid of the awkward
_finish() API by passing the relevant arguments to the callback
directly.
2018-01-05 17:46:49 +01:00
|
|
|
if (error) {
|
|
|
|
|
g_dbus_method_invocation_take_error (context, error);
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
2017-03-27 15:22:22 +00:00
|
|
|
|
connectivity: rework async connectivity check requests
An asynchronous request should either be cancellable or not keep
the target object alive. Preferably both.
Otherwise, it is impossible to do a controlled shutdown when terminating
NetworkManager. Currently, when NetworkManager is about to terminate,
it just quits the mainloop and essentially leaks everything. That is a
bug. If we ever want to fix that, every asynchronous request must be
cancellable in a controlled way (or it must not prevent objects from
getting disposed, where disposing the object automatically cancels the
callback).
Rework the asynchronous request for connectivity check to
- return a handle that can be used to cancel the operation.
Cancelling is optional. The caller may choose to ignore the handle
because the asynchronous operation does not keep the target object
alive. That means, it is still possible to shutdown, by everybody
giving up their reference to the target object. In which case the
callback will be invoked during dispose() of the target object.
- also, the callback will always be invoked exactly once, and never
synchronously from within the asynchronous start call. But during
cancel(), the callback is invoked synchronously from within cancel().
Note that it's only allowed to cancel an action at most once, and
never after the callback is invoked (also not from within the callback
itself).
- also, NMConnectivity already supports a fake handler, in case
connectivity check is disabled via configuration. Hence, reuse
the same code paths also when compiling without --enable-concheck.
That means, instead of having #if WITH_CONCHECK at various callers,
move them into NMConnectivity. The downside is, that if you build
without concheck, there is a small overhead compared to before. The
upside is, we reuse the same code paths when compiling with or without
concheck.
- also, the patch synchronizes the connecitivty states. For example,
previously `nmcli networking connectivity check` would schedule
requests in parallel, and return the accumulated result of the individual
requests.
However, the global connectivity state of the manager might have have
been the same as the answer to the explicit connecitivity check,
because while the answer for the manual check is waiting for all
pending checks to complete, the global connectivity state could
already change. That is just wrong. There are not multiple global
connectivity states at the same time, there is just one. A manual
connectivity check should have the meaning of ensure that the global
state is up to date, but it still should return the global
connectivity state -- not the answers for several connectivity checks
issued in parallel.
This is related to commit b799de281bc01073c31dd2c86171b29c8132441c
(libnm: update property in the manager after connectivity check),
which tries to address a similar problem client side.
Similarly, each device has a connectivity state. While there might
be several connectivity checks per device pending, whenever a check
completes, it can update the per-device state (and return that device
state as result), but the immediate answer of the individual check
might not matter. This is especially the case, when a later request
returns earlier and obsoletes all earlier requests. In that case,
earlier requests return with the result of the currend devices
connectivity state.
This patch cleans up the internal API and gives a better defined behavior
to the user (thus, the simple API which simplifies implementation for the
caller). However, the implementation of getting this API right and properly
handle cancel and destruction of the target object is more complicated and
complex. But this but is not just for the sake of a nicer API. This fixes
actual issues explained above.
Also, get rid of GAsyncResult to track information about the pending request.
Instead, allocate our own handle structure, which ends up to be nicer
because it's strongly typed and has exactly the properties that are
useful to track the request. Also, it gets rid of the awkward
_finish() API by passing the relevant arguments to the callback
directly.
2018-01-05 17:46:49 +01:00
|
|
|
data = g_slice_new (ConnectivityCheckData);
|
|
|
|
|
data->self = g_object_ref (self);
|
|
|
|
|
data->context = context;
|
|
|
|
|
data->remaining = 0;
|
|
|
|
|
|
|
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst) {
|
connectivity: schedule connectivity timers per-device and probe for short outages
It might happen, that connectivitiy is lost only for a moment and
returns soon after. Based on that assumption, when we loose connectivity
we want to have a probe interval where we check for returning
connectivity more frequently.
For that, we handle tracking of the timeouts per-device.
The intervall shall start with 1 seconds, and double the interval time until
the full interval is reached. Actually, due to the implementation, it's unlikely
that we already perform the second check 1 second later. That is because commonly
the first check returns before the one second timeout is reached and bumps the
interval to 2 seconds right away.
Also, we go through extra lengths so that manual connectivity check
delay the periodic checks. By being more smart about that, we can reduce
the number of connectivity checks, but still keeping the promise to
check at least within the requested interval.
The complexity of book keeping the timeouts is remarkable. But I think
it is worth the effort and we should try hard to
- have a connectivity state as accurate as possible. Clearly,
connectivity checking means that we probing, so being more intelligent
about timeout and backoff timers can result in a better connectivity
state. The connectivity state is important because we use it for
the default-route penaly and the GUI indicates bad connectivity.
- be intelligent about avoiding redundant connectivity checks. While
we want to check often to get an accurate connectivity state, we
also want to minimize the number of HTTP requests, in case the
connectivity is established and suppossedly stable.
Also, perform connectivity checks in every state of the device.
Even if a device is disconnected, it still might have connectivity,
for example if the user externally adds an IP address on an unmanaged
device.
https://bugzilla.gnome.org/show_bug.cgi?id=792240
2018-02-20 21:41:14 +01:00
|
|
|
if (nm_device_check_connectivity (device,
|
2018-07-03 19:20:45 +02:00
|
|
|
AF_INET,
|
|
|
|
|
device_connectivity_done,
|
|
|
|
|
data))
|
|
|
|
|
data->remaining++;
|
|
|
|
|
if (nm_device_check_connectivity (device,
|
|
|
|
|
AF_INET6,
|
connectivity: schedule connectivity timers per-device and probe for short outages
It might happen, that connectivitiy is lost only for a moment and
returns soon after. Based on that assumption, when we loose connectivity
we want to have a probe interval where we check for returning
connectivity more frequently.
For that, we handle tracking of the timeouts per-device.
The intervall shall start with 1 seconds, and double the interval time until
the full interval is reached. Actually, due to the implementation, it's unlikely
that we already perform the second check 1 second later. That is because commonly
the first check returns before the one second timeout is reached and bumps the
interval to 2 seconds right away.
Also, we go through extra lengths so that manual connectivity check
delay the periodic checks. By being more smart about that, we can reduce
the number of connectivity checks, but still keeping the promise to
check at least within the requested interval.
The complexity of book keeping the timeouts is remarkable. But I think
it is worth the effort and we should try hard to
- have a connectivity state as accurate as possible. Clearly,
connectivity checking means that we probing, so being more intelligent
about timeout and backoff timers can result in a better connectivity
state. The connectivity state is important because we use it for
the default-route penaly and the GUI indicates bad connectivity.
- be intelligent about avoiding redundant connectivity checks. While
we want to check often to get an accurate connectivity state, we
also want to minimize the number of HTTP requests, in case the
connectivity is established and suppossedly stable.
Also, perform connectivity checks in every state of the device.
Even if a device is disconnected, it still might have connectivity,
for example if the user externally adds an IP address on an unmanaged
device.
https://bugzilla.gnome.org/show_bug.cgi?id=792240
2018-02-20 21:41:14 +01:00
|
|
|
device_connectivity_done,
|
|
|
|
|
data))
|
|
|
|
|
data->remaining++;
|
2013-07-30 16:31:31 -04:00
|
|
|
}
|
|
|
|
|
|
connectivity: rework async connectivity check requests
An asynchronous request should either be cancellable or not keep
the target object alive. Preferably both.
Otherwise, it is impossible to do a controlled shutdown when terminating
NetworkManager. Currently, when NetworkManager is about to terminate,
it just quits the mainloop and essentially leaks everything. That is a
bug. If we ever want to fix that, every asynchronous request must be
cancellable in a controlled way (or it must not prevent objects from
getting disposed, where disposing the object automatically cancels the
callback).
Rework the asynchronous request for connectivity check to
- return a handle that can be used to cancel the operation.
Cancelling is optional. The caller may choose to ignore the handle
because the asynchronous operation does not keep the target object
alive. That means, it is still possible to shutdown, by everybody
giving up their reference to the target object. In which case the
callback will be invoked during dispose() of the target object.
- also, the callback will always be invoked exactly once, and never
synchronously from within the asynchronous start call. But during
cancel(), the callback is invoked synchronously from within cancel().
Note that it's only allowed to cancel an action at most once, and
never after the callback is invoked (also not from within the callback
itself).
- also, NMConnectivity already supports a fake handler, in case
connectivity check is disabled via configuration. Hence, reuse
the same code paths also when compiling without --enable-concheck.
That means, instead of having #if WITH_CONCHECK at various callers,
move them into NMConnectivity. The downside is, that if you build
without concheck, there is a small overhead compared to before. The
upside is, we reuse the same code paths when compiling with or without
concheck.
- also, the patch synchronizes the connecitivty states. For example,
previously `nmcli networking connectivity check` would schedule
requests in parallel, and return the accumulated result of the individual
requests.
However, the global connectivity state of the manager might have have
been the same as the answer to the explicit connecitivity check,
because while the answer for the manual check is waiting for all
pending checks to complete, the global connectivity state could
already change. That is just wrong. There are not multiple global
connectivity states at the same time, there is just one. A manual
connectivity check should have the meaning of ensure that the global
state is up to date, but it still should return the global
connectivity state -- not the answers for several connectivity checks
issued in parallel.
This is related to commit b799de281bc01073c31dd2c86171b29c8132441c
(libnm: update property in the manager after connectivity check),
which tries to address a similar problem client side.
Similarly, each device has a connectivity state. While there might
be several connectivity checks per device pending, whenever a check
completes, it can update the per-device state (and return that device
state as result), but the immediate answer of the individual check
might not matter. This is especially the case, when a later request
returns earlier and obsoletes all earlier requests. In that case,
earlier requests return with the result of the currend devices
connectivity state.
This patch cleans up the internal API and gives a better defined behavior
to the user (thus, the simple API which simplifies implementation for the
caller). However, the implementation of getting this API right and properly
handle cancel and destruction of the target object is more complicated and
complex. But this but is not just for the sake of a nicer API. This fixes
actual issues explained above.
Also, get rid of GAsyncResult to track information about the pending request.
Instead, allocate our own handle structure, which ends up to be nicer
because it's strongly typed and has exactly the properties that are
useful to track the request. Also, it gets rid of the awkward
_finish() API by passing the relevant arguments to the callback
directly.
2018-01-05 17:46:49 +01:00
|
|
|
if (data->remaining == 0) {
|
|
|
|
|
/* call the handler at least once. */
|
|
|
|
|
data->remaining = 1;
|
|
|
|
|
device_connectivity_done (NULL,
|
|
|
|
|
NULL,
|
|
|
|
|
NM_CONNECTIVITY_UNKNOWN,
|
|
|
|
|
NULL,
|
|
|
|
|
data);
|
|
|
|
|
/* @data got destroyed. */
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
out:
|
2018-04-09 11:52:55 +02:00
|
|
|
nm_auth_chain_destroy (chain);
|
2013-07-30 16:31:31 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
impl_manager_check_connectivity (NMDBusObject *obj,
|
|
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
|
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
|
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER (obj);
|
2015-04-15 14:53:30 -04:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2013-07-30 16:31:31 -04:00
|
|
|
NMAuthChain *chain;
|
|
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
chain = nm_auth_chain_new_context (invocation, check_connectivity_auth_done_cb, self);
|
2013-07-29 11:53:23 -05:00
|
|
|
if (!chain) {
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_dbus_method_invocation_return_error_literal(invocation,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
"Unable to authenticate request.");
|
2013-07-29 11:53:23 -05:00
|
|
|
return;
|
2013-07-30 16:31:31 -04:00
|
|
|
}
|
2013-07-29 11:53:23 -05:00
|
|
|
|
|
|
|
|
priv->auth_chains = g_slist_append (priv->auth_chains, chain);
|
|
|
|
|
nm_auth_chain_add_call (chain, NM_AUTH_PERMISSION_NETWORK_CONTROL, TRUE);
|
2013-07-30 16:31:31 -04:00
|
|
|
}
|
|
|
|
|
|
2014-09-17 14:17:30 -05:00
|
|
|
static void
|
|
|
|
|
start_factory (NMDeviceFactory *factory, gpointer user_data)
|
|
|
|
|
{
|
|
|
|
|
nm_device_factory_start (factory);
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-06 21:09:58 +02:00
|
|
|
gboolean
|
|
|
|
|
nm_manager_write_device_state (NMManager *self, NMDevice *device)
|
2016-09-23 17:36:21 +02:00
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2018-07-06 21:09:58 +02:00
|
|
|
int ifindex;
|
|
|
|
|
gboolean managed;
|
|
|
|
|
NMConfigDeviceStateManagedType managed_type;
|
|
|
|
|
const char *uuid = NULL;
|
|
|
|
|
const char *perm_hw_addr_fake = NULL;
|
|
|
|
|
gboolean perm_hw_addr_is_fake;
|
|
|
|
|
guint32 route_metric_default_aspired;
|
|
|
|
|
guint32 route_metric_default_effective;
|
all: don't use gchar/gshort/gint/glong but C types
We commonly don't use the glib typedefs for char/short/int/long,
but their C types directly.
$ git grep '\<g\(char\|short\|int\|long\|float\|double\)\>' | wc -l
587
$ git grep '\<\(char\|short\|int\|long\|float\|double\)\>' | wc -l
21114
One could argue that using the glib typedefs is preferable in
public API (of our glib based libnm library) or where it clearly
is related to glib, like during
g_object_set (obj, PROPERTY, (gint) value, NULL);
However, that argument does not seem strong, because in practice we don't
follow that argument today, and seldomly use the glib typedefs.
Also, the style guide for this would be hard to formalize, because
"using them where clearly related to a glib" is a very loose suggestion.
Also note that glib typedefs will always just be typedefs of the
underlying C types. There is no danger of glib changing the meaning
of these typedefs (because that would be a major API break of glib).
A simple style guide is instead: don't use these typedefs.
No manual actions, I only ran the bash script:
FILES=($(git ls-files '*.[hc]'))
sed -i \
-e 's/\<g\(char\|short\|int\|long\|float\|double\)\>\( [^ ]\)/\1\2/g' \
-e 's/\<g\(char\|short\|int\|long\|float\|double\)\> /\1 /g' \
-e 's/\<g\(char\|short\|int\|long\|float\|double\)\>/\1/g' \
"${FILES[@]}"
2018-07-11 07:40:19 +02:00
|
|
|
int nm_owned;
|
2018-08-13 18:37:02 +02:00
|
|
|
NMDhcp4Config *dhcp4_config;
|
|
|
|
|
const char *root_path = NULL;
|
2016-09-23 17:36:21 +02:00
|
|
|
|
2018-07-06 21:09:58 +02:00
|
|
|
ifindex = nm_device_get_ip_ifindex (device);
|
|
|
|
|
if (ifindex <= 0)
|
|
|
|
|
return FALSE;
|
|
|
|
|
if (ifindex == 1) {
|
|
|
|
|
/* ignore loopback */
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
2016-09-23 17:36:21 +02:00
|
|
|
|
2018-07-06 21:09:58 +02:00
|
|
|
if (!nm_platform_link_get (priv->platform, ifindex))
|
|
|
|
|
return FALSE;
|
2016-09-23 17:36:21 +02:00
|
|
|
|
2018-07-06 21:09:58 +02:00
|
|
|
managed = nm_device_get_managed (device, FALSE);
|
|
|
|
|
if (managed) {
|
2018-09-18 18:14:27 +02:00
|
|
|
NMSettingsConnection *sett_conn = NULL;
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
|
2018-07-04 19:19:38 +02:00
|
|
|
if (nm_device_get_state (device) <= NM_DEVICE_STATE_ACTIVATED)
|
|
|
|
|
sett_conn = nm_device_get_settings_connection (device);
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (sett_conn)
|
|
|
|
|
uuid = nm_settings_connection_get_uuid (sett_conn);
|
2018-07-06 21:09:58 +02:00
|
|
|
managed_type = NM_CONFIG_DEVICE_STATE_MANAGED_TYPE_MANAGED;
|
|
|
|
|
} else if (nm_device_get_unmanaged_flags (device, NM_UNMANAGED_USER_EXPLICIT))
|
|
|
|
|
managed_type = NM_CONFIG_DEVICE_STATE_MANAGED_TYPE_UNMANAGED;
|
|
|
|
|
else
|
|
|
|
|
managed_type = NM_CONFIG_DEVICE_STATE_MANAGED_TYPE_UNKNOWN;
|
2016-09-23 17:36:21 +02:00
|
|
|
|
2018-07-06 21:09:58 +02:00
|
|
|
perm_hw_addr_fake = nm_device_get_permanent_hw_address_full (device, FALSE, &perm_hw_addr_is_fake);
|
|
|
|
|
if (perm_hw_addr_fake && !perm_hw_addr_is_fake)
|
|
|
|
|
perm_hw_addr_fake = NULL;
|
|
|
|
|
|
|
|
|
|
nm_owned = nm_device_is_software (device) ? nm_device_is_nm_owned (device) : -1;
|
2016-09-23 17:36:21 +02:00
|
|
|
|
2018-07-06 21:09:58 +02:00
|
|
|
route_metric_default_effective = _device_route_metric_get (self, ifindex, NM_DEVICE_TYPE_UNKNOWN,
|
|
|
|
|
TRUE, &route_metric_default_aspired);
|
2016-10-13 15:56:13 +02:00
|
|
|
|
2018-08-13 18:37:02 +02:00
|
|
|
dhcp4_config = nm_device_get_dhcp4_config (device);
|
|
|
|
|
if (dhcp4_config)
|
|
|
|
|
root_path = nm_dhcp4_config_get_option (dhcp4_config, "root_path");
|
|
|
|
|
|
2018-07-06 21:09:58 +02:00
|
|
|
return nm_config_device_state_write (ifindex,
|
|
|
|
|
managed_type,
|
|
|
|
|
perm_hw_addr_fake,
|
|
|
|
|
uuid,
|
|
|
|
|
nm_owned,
|
|
|
|
|
route_metric_default_aspired,
|
2018-08-13 18:37:02 +02:00
|
|
|
route_metric_default_effective,
|
|
|
|
|
root_path);
|
2018-07-06 21:09:58 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nm_manager_write_device_state_all (NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
gs_unref_hashtable GHashTable *seen_ifindexes = NULL;
|
|
|
|
|
NMDevice *device;
|
2017-05-31 16:58:21 +02:00
|
|
|
|
2018-07-06 21:09:58 +02:00
|
|
|
seen_ifindexes = g_hash_table_new (nm_direct_hash, NULL);
|
2017-12-06 15:51:18 +01:00
|
|
|
|
2018-07-06 21:09:58 +02:00
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst) {
|
|
|
|
|
if (nm_manager_write_device_state (self, device)) {
|
|
|
|
|
g_hash_table_add (seen_ifindexes,
|
|
|
|
|
GINT_TO_POINTER (nm_device_get_ip_ifindex (device)));
|
|
|
|
|
}
|
2016-09-23 17:36:21 +02:00
|
|
|
}
|
|
|
|
|
|
2017-04-19 15:45:31 +02:00
|
|
|
nm_config_device_state_prune_unseen (seen_ifindexes);
|
2016-09-23 17:36:21 +02:00
|
|
|
}
|
|
|
|
|
|
2017-03-14 10:42:36 +01:00
|
|
|
static gboolean
|
|
|
|
|
devices_inited_cb (gpointer user_data)
|
|
|
|
|
{
|
|
|
|
|
NMManager *self = user_data;
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
|
|
|
|
|
priv->devices_inited_id = 0;
|
|
|
|
|
priv->devices_inited = TRUE;
|
|
|
|
|
check_if_startup_complete (self);
|
|
|
|
|
return G_SOURCE_REMOVE;
|
|
|
|
|
}
|
|
|
|
|
|
2015-08-03 09:26:31 -04:00
|
|
|
gboolean
|
|
|
|
|
nm_manager_start (NMManager *self, GError **error)
|
2009-06-11 00:39:12 -04:00
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2017-02-03 14:15:16 +01:00
|
|
|
gs_free NMSettingsConnection **connections = NULL;
|
2009-12-23 00:03:45 -08:00
|
|
|
guint i;
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2015-08-03 09:26:31 -04:00
|
|
|
if (!nm_settings_start (priv->settings, error))
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
2009-12-23 00:03:45 -08:00
|
|
|
/* Set initial radio enabled/disabled state */
|
|
|
|
|
for (i = 0; i < RFKILL_TYPE_MAX; i++) {
|
|
|
|
|
RadioState *rstate = &priv->radio_states[i];
|
2011-04-13 21:58:25 -05:00
|
|
|
gboolean enabled;
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2009-12-23 00:03:45 -08:00
|
|
|
if (!rstate->desc)
|
|
|
|
|
continue;
|
|
|
|
|
|
2014-04-15 16:25:39 -05:00
|
|
|
/* recheck kernel rfkill state */
|
|
|
|
|
update_rstate_from_rfkill (priv->rfkill_mgr, rstate);
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2010-09-01 17:08:10 -05:00
|
|
|
if (rstate->desc) {
|
2017-06-07 13:13:55 +02:00
|
|
|
_LOGI (LOGD_RFKILL, "rfkill: %s %s by radio killswitch; %s by state file",
|
2016-03-02 11:38:26 +01:00
|
|
|
rstate->desc,
|
|
|
|
|
(rstate->hw_enabled && rstate->sw_enabled) ? "enabled" : "disabled",
|
|
|
|
|
rstate->user_enabled ? "enabled" : "disabled");
|
2010-09-01 17:08:10 -05:00
|
|
|
}
|
2011-04-13 21:58:25 -05:00
|
|
|
enabled = radio_enabled_for_rstate (rstate, TRUE);
|
|
|
|
|
manager_update_radio_enabled (self, rstate, enabled);
|
2009-12-23 00:03:45 -08:00
|
|
|
}
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2010-05-22 08:55:30 -07:00
|
|
|
/* Log overall networking status - enabled/disabled */
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGI (LOGD_CORE, "Networking is %s by state file",
|
|
|
|
|
priv->net_enabled ? "enabled" : "disabled");
|
2010-04-08 18:23:43 -07:00
|
|
|
|
2010-10-27 15:47:10 -05:00
|
|
|
system_unmanaged_devices_changed_cb (priv->settings, NULL, self);
|
2017-04-23 14:20:37 +02:00
|
|
|
hostname_changed_cb (priv->hostname_manager, NULL, self);
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2014-09-05 14:48:21 -05:00
|
|
|
/* Start device factories */
|
core: better order the code at startup
NM was calling nm_bus_manager_start_service() to claim its bus name
before it exported any of its objects, but this didn't matter under
dbus-glib, because no client connections would be accepted until the
main loop was started later on, by which point we would have exported
everything.
But with gdbus, method calls are initially received in the gdbus
worker thread, which means that clients would be able to connect right
away and then be told that the expected interfaces don't exist.
So move the nm_bus_manager_start_service() call to occur after
creating NMSettings and NMManager (and, indirectly, NMAgentManager).
This requires splitting out the slow parts of nm_settings_new() into a
new nm_settings_start(), so that we can create and export it first,
and then read the connections, etc afterward. (Likewise, there were
still a few potentially-slow bits in nm_manager_new() which are now
moved into nm_manager_start().)
2015-07-31 13:00:22 -04:00
|
|
|
nm_device_factory_manager_load_factories (_register_device_factory, self);
|
2014-09-17 14:17:30 -05:00
|
|
|
nm_device_factory_manager_for_each_factory (start_factory, NULL);
|
2014-09-05 14:48:21 -05:00
|
|
|
|
2017-09-29 15:11:33 +02:00
|
|
|
nm_platform_process_events (priv->platform);
|
2017-03-14 12:14:21 +01:00
|
|
|
|
2017-09-29 15:11:33 +02:00
|
|
|
g_signal_connect (priv->platform,
|
2017-03-14 12:14:21 +01:00
|
|
|
NM_PLATFORM_SIGNAL_LINK_CHANGED,
|
|
|
|
|
G_CALLBACK (platform_link_cb),
|
|
|
|
|
self);
|
|
|
|
|
|
2015-05-04 16:54:51 +02:00
|
|
|
platform_query_devices (self);
|
2011-10-18 13:48:44 +02:00
|
|
|
|
core: better order the code at startup
NM was calling nm_bus_manager_start_service() to claim its bus name
before it exported any of its objects, but this didn't matter under
dbus-glib, because no client connections would be accepted until the
main loop was started later on, by which point we would have exported
everything.
But with gdbus, method calls are initially received in the gdbus
worker thread, which means that clients would be able to connect right
away and then be told that the expected interfaces don't exist.
So move the nm_bus_manager_start_service() call to occur after
creating NMSettings and NMManager (and, indirectly, NMAgentManager).
This requires splitting out the slow parts of nm_settings_new() into a
new nm_settings_start(), so that we can create and export it first,
and then read the connections, etc afterward. (Likewise, there were
still a few potentially-slow bits in nm_manager_new() which are now
moved into nm_manager_start().)
2015-07-31 13:00:22 -04:00
|
|
|
/* Load VPN plugins */
|
|
|
|
|
priv->vpn_manager = g_object_ref (nm_vpn_manager_get ());
|
|
|
|
|
|
2014-09-24 16:58:07 -05:00
|
|
|
/* Connections added before the manager is started do not emit
|
2011-10-18 13:48:44 +02:00
|
|
|
* connection-added signals thus devices have to be created manually.
|
|
|
|
|
*/
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGD (LOGD_CORE, "creating virtual devices...");
|
2017-11-22 12:46:58 +01:00
|
|
|
connections = nm_settings_get_connections_clone (priv->settings, NULL,
|
|
|
|
|
NULL, NULL,
|
|
|
|
|
nm_settings_connection_cmp_autoconnect_priority_p_with_data, NULL);
|
2017-02-03 14:15:16 +01:00
|
|
|
for (i = 0; connections[i]; i++)
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
connection_changed (self, connections[i]);
|
core: only manage those bridges created by NetworkManager (rh #905035)
Until we handle bridges non-destructively, only manage bridges
created by NM. When quitting write out a file listing all
bridges created by NM and a timestamp, and when starting read
that file and if the timestamp is within 30 minutes, manage
any bridge that was listed in that file. This scheme, while
not foolproof (eg, if NM crashes), should ensure that NM can
recognize bridges it created if it's restarted. The file
is stored in /run or /var/run, which is cleaned each restart,
ensuring that the state does not persist across reboots.
If an automatic or user-initiated activation request for
a bridge NM does not manage is received, that request is
denied. Only if the bridge interface does not yet exist, or
was present in the managed bridges file, will an
NMDeviceBridge be created and activation be possible.
2013-02-01 18:03:11 -06:00
|
|
|
|
2017-03-14 10:42:36 +01:00
|
|
|
nm_clear_g_source (&priv->devices_inited_id);
|
|
|
|
|
priv->devices_inited_id = g_idle_add_full (G_PRIORITY_LOW + 10, devices_inited_cb, self, NULL);
|
2015-08-03 09:26:31 -04:00
|
|
|
|
|
|
|
|
return TRUE;
|
2009-06-11 00:39:12 -04:00
|
|
|
}
|
|
|
|
|
|
2014-10-29 09:12:18 -05:00
|
|
|
void
|
|
|
|
|
nm_manager_stop (NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMDevice *device;
|
2014-10-29 09:12:18 -05:00
|
|
|
|
2018-04-18 10:23:22 +02:00
|
|
|
/* FIXME(shutdown): we don't do a proper shutdown yet:
|
|
|
|
|
* - need to ensure that all pending async operations are cancelled
|
|
|
|
|
* - e.g. operations in priv->async_op_lst_head
|
|
|
|
|
* - need to ensure that no more asynchronous requests are started,
|
|
|
|
|
* or that they complete quickly, or that they fail quickly.
|
|
|
|
|
* - note that cancelling some operations is not possible synchronously.
|
|
|
|
|
* Hence, stop() only prepares shutdown and tells everybody to not
|
|
|
|
|
* accept new work, and to complete in a timely manner.
|
|
|
|
|
* We need to still iterate the mainloop for a bit, to give everybody
|
|
|
|
|
* the chance to complete.
|
|
|
|
|
* - e.g. see comment at nm_auth_manager_force_shutdown()
|
|
|
|
|
*/
|
|
|
|
|
|
core/dbus: stop NMDBusManager and reject future method calls
During shutdown, we will need to still iterate the main loop
to do a coordinated shutdown. Currently we do not, and we just
exit, leaving a lot of objects hanging.
If we are going to fix that, we need during shutdown tell
NMDBusManager to reject all future operations.
Note that property getters and "GetManagerObjects" call is not
blocked. It continues to work.
Certainly for some operations, we want to allow them to be called even
during shutdown. However, these have to opt-in.
This also fixes an uglyness, where nm_dbus_manager_start() would
get the set-property-handler and the @manager as user-data. However,
NMDBusManager will always outlife NMManager, hence, after NMManager
is destroyed, the user-data would be a dangling pointer. Currently
that is not an issue, because
- we always leak NMManager
- we don't run the mainloop during shutdown
2018-04-21 13:25:57 +02:00
|
|
|
nm_dbus_manager_stop (nm_dbus_object_get_manager (NM_DBUS_OBJECT (self)));
|
|
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
while ((device = c_list_first_entry (&priv->devices_lst_head, NMDevice, devices_lst)))
|
|
|
|
|
remove_device (self, device, TRUE, TRUE);
|
2016-01-22 15:19:06 +01:00
|
|
|
|
|
|
|
|
_active_connection_cleanup (self);
|
2017-03-14 10:42:36 +01:00
|
|
|
|
|
|
|
|
nm_clear_g_source (&priv->devices_inited_id);
|
2014-10-29 09:12:18 -05:00
|
|
|
}
|
|
|
|
|
|
2010-07-01 10:32:11 -07:00
|
|
|
static gboolean
|
|
|
|
|
handle_firmware_changed (gpointer user_data)
|
|
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER (user_data);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMDevice *device;
|
2010-07-01 10:32:11 -07:00
|
|
|
|
|
|
|
|
priv->fw_changed_id = 0;
|
|
|
|
|
|
|
|
|
|
/* Try to re-enable devices with missing firmware */
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst) {
|
|
|
|
|
NMDeviceState state = nm_device_get_state (device);
|
2010-07-01 10:32:11 -07:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
if ( nm_device_get_firmware_missing (device)
|
2010-07-01 10:32:11 -07:00
|
|
|
&& (state == NM_DEVICE_STATE_UNAVAILABLE)) {
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
_LOG2I (LOGD_CORE, device, "firmware may now be available");
|
2010-07-01 10:32:11 -07:00
|
|
|
|
|
|
|
|
/* Re-set unavailable state to try bringing the device up again */
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
nm_device_state_changed (device,
|
2010-07-01 10:32:11 -07:00
|
|
|
NM_DEVICE_STATE_UNAVAILABLE,
|
|
|
|
|
NM_DEVICE_STATE_REASON_NONE);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
firmware_dir_changed (GFileMonitor *monitor,
|
|
|
|
|
GFile *file,
|
|
|
|
|
GFile *other_file,
|
|
|
|
|
GFileMonitorEvent event_type,
|
|
|
|
|
gpointer user_data)
|
|
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER (user_data);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
|
|
|
|
|
switch (event_type) {
|
|
|
|
|
case G_FILE_MONITOR_EVENT_CREATED:
|
|
|
|
|
case G_FILE_MONITOR_EVENT_CHANGED:
|
|
|
|
|
case G_FILE_MONITOR_EVENT_MOVED:
|
|
|
|
|
case G_FILE_MONITOR_EVENT_ATTRIBUTE_CHANGED:
|
|
|
|
|
case G_FILE_MONITOR_EVENT_CHANGES_DONE_HINT:
|
|
|
|
|
if (!priv->fw_changed_id) {
|
|
|
|
|
priv->fw_changed_id = g_timeout_add_seconds (4, handle_firmware_changed, self);
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGI (LOGD_CORE, "kernel firmware directory '%s' changed",
|
|
|
|
|
KERNEL_FIRMWARE_DIR);
|
2010-07-01 10:32:11 -07:00
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-06-03 09:15:24 +02:00
|
|
|
static void
|
|
|
|
|
connection_metered_changed (GObject *object,
|
|
|
|
|
NMMetered metered,
|
|
|
|
|
gpointer user_data)
|
|
|
|
|
{
|
|
|
|
|
nm_manager_update_metered (NM_MANAGER (user_data));
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-22 13:06:51 -04:00
|
|
|
static void
|
2018-06-28 18:05:05 +02:00
|
|
|
policy_default_ac_changed (GObject *object, GParamSpec *pspec, gpointer user_data)
|
2013-08-22 13:06:51 -04:00
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER (user_data);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
NMActiveConnection *ac;
|
|
|
|
|
|
|
|
|
|
/* Note: this assumes that it's not possible for the IP4 default
|
|
|
|
|
* route to be going over the default-ip6-device. If that changes,
|
|
|
|
|
* we need something more complicated here.
|
|
|
|
|
*/
|
2018-06-28 18:05:05 +02:00
|
|
|
ac = nm_policy_get_default_ip4_ac (priv->policy);
|
|
|
|
|
if (!ac)
|
|
|
|
|
ac = nm_policy_get_default_ip6_ac (priv->policy);
|
2013-08-22 13:06:51 -04:00
|
|
|
|
|
|
|
|
if (ac != priv->primary_connection) {
|
2015-06-03 09:15:24 +02:00
|
|
|
if (priv->primary_connection) {
|
|
|
|
|
g_signal_handlers_disconnect_by_func (priv->primary_connection,
|
|
|
|
|
G_CALLBACK (connection_metered_changed),
|
|
|
|
|
self);
|
|
|
|
|
g_clear_object (&priv->primary_connection);
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-22 13:06:51 -04:00
|
|
|
priv->primary_connection = ac ? g_object_ref (ac) : NULL;
|
2015-06-03 09:15:24 +02:00
|
|
|
|
|
|
|
|
if (priv->primary_connection) {
|
2018-06-28 18:05:05 +02:00
|
|
|
g_signal_connect (priv->primary_connection,
|
|
|
|
|
NM_ACTIVE_CONNECTION_DEVICE_METERED_CHANGED,
|
2015-06-03 09:15:24 +02:00
|
|
|
G_CALLBACK (connection_metered_changed), self);
|
|
|
|
|
}
|
2018-06-28 18:05:05 +02:00
|
|
|
_LOGD (LOGD_CORE, "PrimaryConnection now %s",
|
|
|
|
|
ac ? nm_active_connection_get_settings_connection_id (ac) : "(none)");
|
2016-04-01 17:34:51 +02:00
|
|
|
_notify (self, PROP_PRIMARY_CONNECTION);
|
|
|
|
|
_notify (self, PROP_PRIMARY_CONNECTION_TYPE);
|
2015-06-03 09:15:24 +02:00
|
|
|
nm_manager_update_metered (self);
|
2013-08-22 13:06:51 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2018-06-28 18:05:05 +02:00
|
|
|
policy_activating_ac_changed (GObject *object, GParamSpec *pspec, gpointer user_data)
|
2013-08-22 13:06:51 -04:00
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER (user_data);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2018-06-28 18:05:05 +02:00
|
|
|
NMActiveConnection *activating, *best;
|
2013-08-22 13:06:51 -04:00
|
|
|
|
2018-06-28 18:05:05 +02:00
|
|
|
/* We only look at activating-ip6-ac if activating-ip4-ac
|
|
|
|
|
* AND default-ip4-ac are NULL; if default-ip4-ac is
|
|
|
|
|
* non-NULL, then activating-ip6-ac is irrelevant, since while
|
|
|
|
|
* that AC might become the new default-ip6-ac, it can't
|
|
|
|
|
* become primary-connection while default-ip4-ac is set to
|
2013-08-22 13:06:51 -04:00
|
|
|
* something else.
|
|
|
|
|
*/
|
2018-06-28 18:05:05 +02:00
|
|
|
activating = nm_policy_get_activating_ip4_ac (priv->policy);
|
|
|
|
|
best = nm_policy_get_default_ip4_ac (priv->policy);
|
2013-08-22 13:06:51 -04:00
|
|
|
if (!activating && !best)
|
2018-06-28 18:05:05 +02:00
|
|
|
activating = nm_policy_get_activating_ip6_ac (priv->policy);
|
2013-08-22 13:06:51 -04:00
|
|
|
|
2018-06-28 18:05:05 +02:00
|
|
|
if (nm_g_object_ref_set (&priv->activating_connection, activating)) {
|
|
|
|
|
_LOGD (LOGD_CORE, "ActivatingConnection now %s",
|
|
|
|
|
activating
|
|
|
|
|
? nm_active_connection_get_settings_connection_id (activating)
|
|
|
|
|
: "(none)");
|
2016-04-01 17:34:51 +02:00
|
|
|
_notify (self, PROP_ACTIVATING_CONNECTION);
|
2013-08-22 13:06:51 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
/*****************************************************************************/
|
2015-04-15 14:53:30 -04:00
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
|
NMManager *self;
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
NMDBusObject *obj;
|
|
|
|
|
const NMDBusInterfaceInfoExtended *interface_info;
|
|
|
|
|
const NMDBusPropertyInfoExtended *property_info;
|
|
|
|
|
GVariant *value;
|
|
|
|
|
guint64 export_version_id;
|
|
|
|
|
} DBusSetPropertyHandle;
|
2015-04-15 14:53:30 -04:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
#define NM_PERM_DENIED_ERROR "org.freedesktop.NetworkManager.PermissionDenied"
|
2010-08-25 15:12:32 -05:00
|
|
|
|
|
|
|
|
static void
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
_dbus_set_property_auth_cb (NMAuthChain *chain,
|
|
|
|
|
GError *error,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
gpointer user_data)
|
2010-08-25 15:12:32 -05:00
|
|
|
{
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
DBusSetPropertyHandle *handle_data = user_data;
|
|
|
|
|
gs_unref_object NMDBusObject *obj = handle_data->obj;
|
|
|
|
|
const NMDBusInterfaceInfoExtended *interface_info = handle_data->interface_info;
|
|
|
|
|
const NMDBusPropertyInfoExtended *property_info = handle_data->property_info;
|
|
|
|
|
gs_unref_variant GVariant *value = handle_data->value;
|
|
|
|
|
guint64 export_version_id = handle_data->export_version_id;
|
|
|
|
|
gs_unref_object NMManager *self = handle_data->self;
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2010-08-25 15:12:32 -05:00
|
|
|
NMAuthCallResult result;
|
2018-11-22 14:39:22 +01:00
|
|
|
gs_free_error GError *local = NULL;
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
const char *error_name = NULL;
|
|
|
|
|
const char *error_message = NULL;
|
|
|
|
|
GValue gvalue;
|
|
|
|
|
|
|
|
|
|
g_slice_free (DBusSetPropertyHandle, handle_data);
|
2010-08-25 15:12:32 -05:00
|
|
|
|
2015-04-15 14:53:30 -04:00
|
|
|
priv->auth_chains = g_slist_remove (priv->auth_chains, chain);
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
result = nm_auth_chain_get_result (chain, property_info->writable.permission);
|
2015-09-15 11:34:00 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
if ( error
|
|
|
|
|
|| result != NM_AUTH_CALL_RESULT_YES) {
|
|
|
|
|
error_name = NM_PERM_DENIED_ERROR;
|
|
|
|
|
error_message = error ? error->message : "Not authorized to perform this operation";
|
|
|
|
|
goto out;
|
2010-08-25 15:12:32 -05:00
|
|
|
}
|
|
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
if (export_version_id != nm_dbus_object_get_export_version_id (obj)) {
|
|
|
|
|
error_name = "org.freedesktop.DBus.Error.UnknownObject";
|
|
|
|
|
error_message = "Object was deleted while authenticating";
|
|
|
|
|
goto out;
|
2015-09-15 11:34:00 +02:00
|
|
|
}
|
|
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
/* Handle some properties specially *sigh* */
|
|
|
|
|
if ( interface_info == &interface_info_manager
|
|
|
|
|
&& nm_streq (property_info->property_name, NM_MANAGER_GLOBAL_DNS_CONFIGURATION)) {
|
|
|
|
|
const NMGlobalDnsConfig *global_dns;
|
2015-07-03 11:06:39 +02:00
|
|
|
|
|
|
|
|
global_dns = nm_config_data_get_global_dns_config (nm_config_get_data (priv->config));
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
if ( global_dns
|
|
|
|
|
&& !nm_global_dns_config_is_internal (global_dns)) {
|
|
|
|
|
error_name = NM_PERM_DENIED_ERROR;
|
|
|
|
|
error_message = "Global DNS configuration already set via configuration file";
|
|
|
|
|
goto out;
|
2015-07-03 11:06:39 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_dbus_gvariant_to_gvalue (value, &gvalue);
|
2018-11-22 14:39:22 +01:00
|
|
|
if (!nm_g_object_set_property (G_OBJECT (obj), property_info->property_name, &gvalue, &local)) {
|
|
|
|
|
error_name = "org.freedesktop.DBus.Error.InvalidArgs";
|
|
|
|
|
error_message = local->message;
|
|
|
|
|
}
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_value_unset (&gvalue);
|
2015-09-15 11:34:00 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
out:
|
|
|
|
|
nm_audit_log_control_op (property_info->writable.audit_op,
|
|
|
|
|
property_info->property_name,
|
|
|
|
|
!error_message,
|
|
|
|
|
nm_auth_chain_get_subject (chain),
|
|
|
|
|
error_message);
|
|
|
|
|
if (error_message)
|
|
|
|
|
g_dbus_method_invocation_return_dbus_error (invocation, error_name, error_message);
|
|
|
|
|
else
|
|
|
|
|
g_dbus_method_invocation_return_value (invocation, NULL);
|
2018-04-09 11:52:55 +02:00
|
|
|
nm_auth_chain_destroy (chain);
|
2010-08-25 15:12:32 -05:00
|
|
|
}
|
|
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
void
|
|
|
|
|
nm_manager_dbus_set_property_handle (NMDBusObject *obj,
|
|
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
|
|
|
|
const NMDBusPropertyInfoExtended *property_info,
|
|
|
|
|
GDBusConnection *connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *value,
|
|
|
|
|
gpointer user_data)
|
2010-08-25 15:12:32 -05:00
|
|
|
{
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
NMManager *self = user_data;
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2010-08-25 15:12:32 -05:00
|
|
|
NMAuthChain *chain;
|
2015-08-20 13:42:00 +02:00
|
|
|
const char *error_message = NULL;
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
gs_unref_object NMAuthSubject *subject = NULL;
|
|
|
|
|
DBusSetPropertyHandle *handle_data;
|
2010-08-25 15:12:32 -05:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
subject = nm_auth_subject_new_unix_process_from_context (invocation);
|
|
|
|
|
if (!subject) {
|
|
|
|
|
error_message = "Could not determine request UID";
|
|
|
|
|
goto err;
|
2010-08-25 15:12:32 -05:00
|
|
|
}
|
|
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
handle_data = g_slice_new0 (DBusSetPropertyHandle);
|
|
|
|
|
handle_data->self = g_object_ref (self);
|
|
|
|
|
handle_data->obj = g_object_ref (obj);
|
|
|
|
|
handle_data->interface_info = interface_info;
|
|
|
|
|
handle_data->property_info = property_info;
|
|
|
|
|
handle_data->value = g_variant_ref (value);
|
|
|
|
|
handle_data->export_version_id = nm_dbus_object_get_export_version_id (obj);
|
2013-07-29 11:53:23 -05:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
chain = nm_auth_chain_new_subject (subject, invocation, _dbus_set_property_auth_cb, handle_data);
|
2012-10-08 12:52:15 -05:00
|
|
|
priv->auth_chains = g_slist_append (priv->auth_chains, chain);
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
nm_auth_chain_add_call (chain, property_info->writable.permission, TRUE);
|
|
|
|
|
return;
|
2015-08-20 14:25:46 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
err:
|
|
|
|
|
nm_audit_log_control_op (property_info->writable.audit_op,
|
|
|
|
|
property_info->property_name,
|
|
|
|
|
FALSE,
|
|
|
|
|
invocation,
|
|
|
|
|
error_message);
|
|
|
|
|
g_dbus_method_invocation_return_error_literal (invocation,
|
|
|
|
|
G_DBUS_ERROR,
|
|
|
|
|
G_DBUS_ERROR_AUTH_FAILED,
|
|
|
|
|
error_message);
|
2015-08-20 14:25:46 +02:00
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2015-08-20 14:25:46 +02:00
|
|
|
|
2016-07-01 12:11:01 +02:00
|
|
|
static NMCheckpointManager *
|
|
|
|
|
_checkpoint_mgr_get (NMManager *self, gboolean create_as_needed)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
|
|
|
|
|
if (G_UNLIKELY (!priv->checkpoint_mgr) && create_as_needed)
|
2017-10-21 16:05:14 +02:00
|
|
|
priv->checkpoint_mgr = nm_checkpoint_manager_new (self, obj_properties[PROP_CHECKPOINTS]);
|
2016-07-01 12:11:01 +02:00
|
|
|
return priv->checkpoint_mgr;
|
|
|
|
|
}
|
|
|
|
|
|
2016-08-01 17:57:13 +02:00
|
|
|
static void
|
|
|
|
|
checkpoint_auth_done_cb (NMAuthChain *chain,
|
|
|
|
|
GError *auth_error,
|
|
|
|
|
GDBusMethodInvocation *context,
|
|
|
|
|
gpointer user_data)
|
|
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER (user_data);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2016-08-01 17:19:14 +02:00
|
|
|
char *op, *checkpoint_path = NULL, **devices;
|
2016-08-01 17:57:13 +02:00
|
|
|
NMCheckpoint *checkpoint;
|
|
|
|
|
NMAuthCallResult result;
|
|
|
|
|
guint32 timeout, flags;
|
|
|
|
|
GVariant *variant = NULL;
|
|
|
|
|
GError *error = NULL;
|
2016-08-01 17:19:14 +02:00
|
|
|
const char *arg = NULL;
|
checkpoint: allow resetting the rollback timeout via D-Bus
This allows to adjust the timeout of an existing checkpoint.
The main usecase of checkpoints, is to have a fail-safe when
configuring the network remotely. By allowing to reset the timeout,
the user can perform a series of actions, and keep bumping the
timeout. That way, the entire series is still guarded by the same
checkpoint, but the user can start with short timeout, and
re-adjust the timeout as he goes along.
The libnm API only implements the async form (at least for now).
Sync methods are fundamentally wrong with D-Bus, and it's probably
not needed. Also, follow glib convenction, where the async form
doesn't have the _async name suffix. Also, accept a D-Bus path
as argument, not a NMCheckpoint instance. The libnm API should
not be more restricted than the underlying D-Bus API. It would
be cumbersome to require the user to lookup the NMCheckpoint
instance first, especially since libnm doesn't provide an efficient
or convenient lookup-by-path method. On the other hand, retrieving
the path from a NMCheckpoint instance is always possible.
2018-03-28 08:09:56 +02:00
|
|
|
guint32 add_timeout;
|
2016-08-01 17:57:13 +02:00
|
|
|
|
2016-08-01 17:19:14 +02:00
|
|
|
op = nm_auth_chain_get_data (chain, "audit-op");
|
2016-08-01 17:57:13 +02:00
|
|
|
priv->auth_chains = g_slist_remove (priv->auth_chains, chain);
|
|
|
|
|
result = nm_auth_chain_get_result (chain, NM_AUTH_PERMISSION_CHECKPOINT_ROLLBACK);
|
|
|
|
|
|
checkpoint: allow resetting the rollback timeout via D-Bus
This allows to adjust the timeout of an existing checkpoint.
The main usecase of checkpoints, is to have a fail-safe when
configuring the network remotely. By allowing to reset the timeout,
the user can perform a series of actions, and keep bumping the
timeout. That way, the entire series is still guarded by the same
checkpoint, but the user can start with short timeout, and
re-adjust the timeout as he goes along.
The libnm API only implements the async form (at least for now).
Sync methods are fundamentally wrong with D-Bus, and it's probably
not needed. Also, follow glib convenction, where the async form
doesn't have the _async name suffix. Also, accept a D-Bus path
as argument, not a NMCheckpoint instance. The libnm API should
not be more restricted than the underlying D-Bus API. It would
be cumbersome to require the user to lookup the NMCheckpoint
instance first, especially since libnm doesn't provide an efficient
or convenient lookup-by-path method. On the other hand, retrieving
the path from a NMCheckpoint instance is always possible.
2018-03-28 08:09:56 +02:00
|
|
|
if (NM_IN_STRSET (op, NM_AUDIT_OP_CHECKPOINT_DESTROY,
|
|
|
|
|
NM_AUDIT_OP_CHECKPOINT_ROLLBACK,
|
|
|
|
|
NM_AUDIT_OP_CHECKPOINT_ADJUST_ROLLBACK_TIMEOUT))
|
2016-08-01 17:19:14 +02:00
|
|
|
arg = checkpoint_path = nm_auth_chain_get_data (chain, "checkpoint_path");
|
|
|
|
|
|
2016-08-01 17:57:13 +02:00
|
|
|
if (auth_error) {
|
|
|
|
|
error = g_error_new (NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
"checkpoint check request failed: %s",
|
|
|
|
|
auth_error->message);
|
|
|
|
|
} else if (result != NM_AUTH_CALL_RESULT_YES) {
|
|
|
|
|
error = g_error_new_literal (NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
"Not authorized to checkpoint/rollback");
|
|
|
|
|
} else {
|
2016-08-01 17:19:14 +02:00
|
|
|
if (nm_streq0 (op, NM_AUDIT_OP_CHECKPOINT_CREATE)) {
|
2016-08-01 17:57:13 +02:00
|
|
|
timeout = GPOINTER_TO_UINT (nm_auth_chain_get_data (chain, "timeout"));
|
|
|
|
|
flags = GPOINTER_TO_UINT (nm_auth_chain_get_data (chain, "flags"));
|
|
|
|
|
devices = nm_auth_chain_get_data (chain, "devices");
|
|
|
|
|
|
|
|
|
|
checkpoint = nm_checkpoint_manager_create (_checkpoint_mgr_get (self, TRUE),
|
|
|
|
|
(const char *const *) devices,
|
|
|
|
|
timeout,
|
|
|
|
|
(NMCheckpointCreateFlags) flags,
|
|
|
|
|
&error);
|
|
|
|
|
if (checkpoint) {
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
arg = nm_dbus_object_get_path (NM_DBUS_OBJECT (checkpoint));
|
2016-08-01 17:19:14 +02:00
|
|
|
variant = g_variant_new ("(o)", arg);
|
2016-08-01 17:57:13 +02:00
|
|
|
}
|
2016-08-01 17:19:14 +02:00
|
|
|
} else if (nm_streq0 (op, NM_AUDIT_OP_CHECKPOINT_DESTROY)) {
|
2016-08-01 17:57:13 +02:00
|
|
|
nm_checkpoint_manager_destroy (_checkpoint_mgr_get (self, TRUE),
|
|
|
|
|
checkpoint_path, &error);
|
2016-08-01 17:19:14 +02:00
|
|
|
} else if (nm_streq0 (op, NM_AUDIT_OP_CHECKPOINT_ROLLBACK)) {
|
2016-08-01 17:57:13 +02:00
|
|
|
nm_checkpoint_manager_rollback (_checkpoint_mgr_get (self, TRUE),
|
|
|
|
|
checkpoint_path, &variant, &error);
|
checkpoint: allow resetting the rollback timeout via D-Bus
This allows to adjust the timeout of an existing checkpoint.
The main usecase of checkpoints, is to have a fail-safe when
configuring the network remotely. By allowing to reset the timeout,
the user can perform a series of actions, and keep bumping the
timeout. That way, the entire series is still guarded by the same
checkpoint, but the user can start with short timeout, and
re-adjust the timeout as he goes along.
The libnm API only implements the async form (at least for now).
Sync methods are fundamentally wrong with D-Bus, and it's probably
not needed. Also, follow glib convenction, where the async form
doesn't have the _async name suffix. Also, accept a D-Bus path
as argument, not a NMCheckpoint instance. The libnm API should
not be more restricted than the underlying D-Bus API. It would
be cumbersome to require the user to lookup the NMCheckpoint
instance first, especially since libnm doesn't provide an efficient
or convenient lookup-by-path method. On the other hand, retrieving
the path from a NMCheckpoint instance is always possible.
2018-03-28 08:09:56 +02:00
|
|
|
} else if (nm_streq0 (op, NM_AUDIT_OP_CHECKPOINT_ADJUST_ROLLBACK_TIMEOUT)) {
|
|
|
|
|
add_timeout = GPOINTER_TO_UINT (nm_auth_chain_get_data (chain, "add_timeout"));
|
|
|
|
|
nm_checkpoint_manager_adjust_rollback_timeout (_checkpoint_mgr_get (self, TRUE),
|
|
|
|
|
checkpoint_path, add_timeout, &error);
|
2016-08-01 17:57:13 +02:00
|
|
|
} else
|
|
|
|
|
g_return_if_reached ();
|
|
|
|
|
}
|
|
|
|
|
|
2016-08-01 17:19:14 +02:00
|
|
|
nm_audit_log_checkpoint_op (op, arg ?: "", !error, nm_auth_chain_get_subject (chain),
|
|
|
|
|
error ? error->message : NULL);
|
|
|
|
|
|
2016-08-01 17:57:13 +02:00
|
|
|
if (error)
|
|
|
|
|
g_dbus_method_invocation_take_error (context, error);
|
|
|
|
|
else
|
|
|
|
|
g_dbus_method_invocation_return_value (context, variant);
|
|
|
|
|
|
2018-04-09 11:52:55 +02:00
|
|
|
nm_auth_chain_destroy (chain);
|
2016-08-01 17:57:13 +02:00
|
|
|
}
|
|
|
|
|
|
2016-07-01 12:11:01 +02:00
|
|
|
static void
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
impl_manager_checkpoint_create (NMDBusObject *obj,
|
|
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
|
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
|
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER (obj);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2016-08-01 17:57:13 +02:00
|
|
|
NMAuthChain *chain;
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
char **devices;
|
|
|
|
|
guint32 rollback_timeout;
|
|
|
|
|
guint32 flags;
|
2016-07-01 12:11:01 +02:00
|
|
|
|
|
|
|
|
G_STATIC_ASSERT_EXPR (sizeof (flags) <= sizeof (NMCheckpointCreateFlags));
|
|
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
chain = nm_auth_chain_new_context (invocation, checkpoint_auth_done_cb, self);
|
2016-08-01 17:57:13 +02:00
|
|
|
if (!chain) {
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_dbus_method_invocation_return_error_literal (invocation,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
"Unable to authenticate request.");
|
2016-07-01 12:11:01 +02:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_variant_get (parameters, "(^aouu)", &devices, &rollback_timeout, &flags);
|
|
|
|
|
|
2016-08-01 17:57:13 +02:00
|
|
|
priv->auth_chains = g_slist_append (priv->auth_chains, chain);
|
2016-08-01 17:19:14 +02:00
|
|
|
nm_auth_chain_set_data (chain, "audit-op", NM_AUDIT_OP_CHECKPOINT_CREATE, NULL);
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
nm_auth_chain_set_data (chain, "devices", devices, (GDestroyNotify) g_strfreev);
|
2016-08-01 17:57:13 +02:00
|
|
|
nm_auth_chain_set_data (chain, "flags", GUINT_TO_POINTER (flags), NULL);
|
|
|
|
|
nm_auth_chain_set_data (chain, "timeout", GUINT_TO_POINTER (rollback_timeout), NULL);
|
|
|
|
|
nm_auth_chain_add_call (chain, NM_AUTH_PERMISSION_CHECKPOINT_ROLLBACK, TRUE);
|
2016-07-01 12:11:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
impl_manager_checkpoint_destroy (NMDBusObject *obj,
|
|
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
|
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
|
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER (obj);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2016-08-01 17:57:13 +02:00
|
|
|
NMAuthChain *chain;
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
const char *checkpoint_path;
|
2016-07-01 12:11:01 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
chain = nm_auth_chain_new_context (invocation, checkpoint_auth_done_cb, self);
|
2016-08-01 17:57:13 +02:00
|
|
|
if (!chain) {
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_dbus_method_invocation_return_error_literal (invocation,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
"Unable to authenticate request.");
|
2016-07-01 12:11:01 +02:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_variant_get (parameters, "(&o)", &checkpoint_path);
|
|
|
|
|
|
2016-08-01 17:57:13 +02:00
|
|
|
priv->auth_chains = g_slist_append (priv->auth_chains, chain);
|
2016-08-01 17:19:14 +02:00
|
|
|
nm_auth_chain_set_data (chain, "audit-op", NM_AUDIT_OP_CHECKPOINT_DESTROY, NULL);
|
2016-08-01 17:57:13 +02:00
|
|
|
nm_auth_chain_set_data (chain, "checkpoint_path", g_strdup (checkpoint_path), g_free);
|
|
|
|
|
nm_auth_chain_add_call (chain, NM_AUTH_PERMISSION_CHECKPOINT_ROLLBACK, TRUE);
|
2016-07-01 12:11:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
impl_manager_checkpoint_rollback (NMDBusObject *obj,
|
|
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
|
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
|
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER (obj);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2016-08-01 17:57:13 +02:00
|
|
|
NMAuthChain *chain;
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
const char *checkpoint_path;
|
2016-07-01 12:11:01 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
chain = nm_auth_chain_new_context (invocation, checkpoint_auth_done_cb, self);
|
2016-08-01 17:57:13 +02:00
|
|
|
if (!chain) {
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_dbus_method_invocation_return_error_literal (invocation,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
"Unable to authenticate request.");
|
2016-07-01 12:11:01 +02:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_variant_get (parameters, "(&o)", &checkpoint_path);
|
|
|
|
|
|
2016-08-01 17:57:13 +02:00
|
|
|
priv->auth_chains = g_slist_append (priv->auth_chains, chain);
|
2016-08-01 17:19:14 +02:00
|
|
|
nm_auth_chain_set_data (chain, "audit-op", NM_AUDIT_OP_CHECKPOINT_ROLLBACK, NULL);
|
2016-08-01 17:57:13 +02:00
|
|
|
nm_auth_chain_set_data (chain, "checkpoint_path", g_strdup (checkpoint_path), g_free);
|
|
|
|
|
nm_auth_chain_add_call (chain, NM_AUTH_PERMISSION_CHECKPOINT_ROLLBACK, TRUE);
|
2016-07-01 12:11:01 +02:00
|
|
|
}
|
|
|
|
|
|
checkpoint: allow resetting the rollback timeout via D-Bus
This allows to adjust the timeout of an existing checkpoint.
The main usecase of checkpoints, is to have a fail-safe when
configuring the network remotely. By allowing to reset the timeout,
the user can perform a series of actions, and keep bumping the
timeout. That way, the entire series is still guarded by the same
checkpoint, but the user can start with short timeout, and
re-adjust the timeout as he goes along.
The libnm API only implements the async form (at least for now).
Sync methods are fundamentally wrong with D-Bus, and it's probably
not needed. Also, follow glib convenction, where the async form
doesn't have the _async name suffix. Also, accept a D-Bus path
as argument, not a NMCheckpoint instance. The libnm API should
not be more restricted than the underlying D-Bus API. It would
be cumbersome to require the user to lookup the NMCheckpoint
instance first, especially since libnm doesn't provide an efficient
or convenient lookup-by-path method. On the other hand, retrieving
the path from a NMCheckpoint instance is always possible.
2018-03-28 08:09:56 +02:00
|
|
|
static void
|
|
|
|
|
impl_manager_checkpoint_adjust_rollback_timeout (NMDBusObject *obj,
|
|
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
|
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
|
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER (obj);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
NMAuthChain *chain;
|
|
|
|
|
const char *checkpoint_path;
|
|
|
|
|
guint32 add_timeout;
|
|
|
|
|
|
|
|
|
|
chain = nm_auth_chain_new_context (invocation, checkpoint_auth_done_cb, self);
|
|
|
|
|
if (!chain) {
|
|
|
|
|
g_dbus_method_invocation_return_error_literal (invocation,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
"Unable to authenticate request.");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
g_variant_get (parameters, "(&ou)", &checkpoint_path, &add_timeout);
|
|
|
|
|
|
|
|
|
|
priv->auth_chains = g_slist_append (priv->auth_chains, chain);
|
|
|
|
|
nm_auth_chain_set_data (chain, "audit-op", NM_AUDIT_OP_CHECKPOINT_ADJUST_ROLLBACK_TIMEOUT, NULL);
|
|
|
|
|
nm_auth_chain_set_data (chain, "checkpoint_path", g_strdup (checkpoint_path), g_free);
|
|
|
|
|
nm_auth_chain_set_data (chain, "add_timeout", GUINT_TO_POINTER (add_timeout), NULL);
|
|
|
|
|
nm_auth_chain_add_call (chain, NM_AUTH_PERMISSION_CHECKPOINT_ROLLBACK, TRUE);
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2016-07-01 12:11:01 +02:00
|
|
|
|
2014-01-24 13:24:01 -06:00
|
|
|
static void
|
2016-07-01 14:25:34 +02:00
|
|
|
auth_mgr_changed (NMAuthManager *auth_manager, gpointer user_data)
|
2014-01-24 13:24:01 -06:00
|
|
|
{
|
|
|
|
|
/* Let clients know they should re-check their authorization */
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
nm_dbus_object_emit_signal (user_data,
|
|
|
|
|
&interface_info_manager,
|
|
|
|
|
&signal_info_check_permissions,
|
|
|
|
|
"()");
|
2014-01-24 13:24:01 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define KERN_RFKILL_OP_CHANGE_ALL 3
|
|
|
|
|
#define KERN_RFKILL_TYPE_WLAN 1
|
2013-05-31 14:11:56 -05:00
|
|
|
#define KERN_RFKILL_TYPE_WWAN 5
|
2014-01-24 13:24:01 -06:00
|
|
|
struct rfkill_event {
|
|
|
|
|
__u32 idx;
|
|
|
|
|
__u8 type;
|
|
|
|
|
__u8 op;
|
|
|
|
|
__u8 soft, hard;
|
2017-01-16 16:08:20 +01:00
|
|
|
} _nm_packed;
|
2014-01-24 13:24:01 -06:00
|
|
|
|
|
|
|
|
static void
|
2016-04-27 18:26:39 +02:00
|
|
|
rfkill_change (NMManager *self, const char *desc, RfKillType rtype, gboolean enabled)
|
2014-01-24 13:24:01 -06:00
|
|
|
{
|
|
|
|
|
int fd;
|
|
|
|
|
struct rfkill_event event;
|
|
|
|
|
ssize_t len;
|
|
|
|
|
|
2013-05-31 14:11:56 -05:00
|
|
|
g_return_if_fail (rtype == RFKILL_TYPE_WLAN || rtype == RFKILL_TYPE_WWAN);
|
|
|
|
|
|
2014-01-24 13:24:01 -06:00
|
|
|
errno = 0;
|
2016-12-10 15:28:15 +01:00
|
|
|
fd = open ("/dev/rfkill", O_RDWR | O_CLOEXEC);
|
2014-01-24 13:24:01 -06:00
|
|
|
if (fd < 0) {
|
|
|
|
|
if (errno == EACCES)
|
2017-06-07 13:13:55 +02:00
|
|
|
_LOGW (LOGD_RFKILL, "rfkill: (%s): failed to open killswitch device", desc);
|
2014-01-24 13:24:01 -06:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (fcntl (fd, F_SETFL, O_NONBLOCK) < 0) {
|
2017-06-07 13:13:55 +02:00
|
|
|
_LOGW (LOGD_RFKILL, "rfkill: (%s): failed to set killswitch device for "
|
2016-04-27 18:26:39 +02:00
|
|
|
"non-blocking operation", desc);
|
2017-11-14 14:22:21 +01:00
|
|
|
nm_close (fd);
|
2014-01-24 13:24:01 -06:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
memset (&event, 0, sizeof (event));
|
|
|
|
|
event.op = KERN_RFKILL_OP_CHANGE_ALL;
|
2013-05-31 14:11:56 -05:00
|
|
|
switch (rtype) {
|
|
|
|
|
case RFKILL_TYPE_WLAN:
|
|
|
|
|
event.type = KERN_RFKILL_TYPE_WLAN;
|
|
|
|
|
break;
|
|
|
|
|
case RFKILL_TYPE_WWAN:
|
|
|
|
|
event.type = KERN_RFKILL_TYPE_WWAN;
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
g_assert_not_reached ();
|
|
|
|
|
}
|
2014-01-24 13:24:01 -06:00
|
|
|
event.soft = enabled ? 0 : 1;
|
|
|
|
|
|
|
|
|
|
len = write (fd, &event, sizeof (event));
|
|
|
|
|
if (len < 0) {
|
2018-11-21 11:32:38 +01:00
|
|
|
_LOGW (LOGD_RFKILL, "rfkill: (%s): failed to change Wi-Fi killswitch state: (%d) %s",
|
2016-04-27 18:26:39 +02:00
|
|
|
desc, errno, g_strerror (errno));
|
2014-01-24 13:24:01 -06:00
|
|
|
} else if (len == sizeof (event)) {
|
2017-06-07 13:13:55 +02:00
|
|
|
_LOGI (LOGD_RFKILL, "rfkill: %s hardware radio set %s",
|
2016-04-27 18:26:39 +02:00
|
|
|
desc, enabled ? "enabled" : "disabled");
|
2014-01-24 13:24:01 -06:00
|
|
|
} else {
|
|
|
|
|
/* Failed to write full structure */
|
2018-11-21 11:32:38 +01:00
|
|
|
_LOGW (LOGD_RFKILL, "rfkill: (%s): failed to change Wi-Fi killswitch state", desc);
|
2014-01-24 13:24:01 -06:00
|
|
|
}
|
|
|
|
|
|
2017-11-14 14:22:21 +01:00
|
|
|
nm_close (fd);
|
2014-01-24 13:24:01 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
manager_radio_user_toggled (NMManager *self,
|
|
|
|
|
RadioState *rstate,
|
|
|
|
|
gboolean enabled)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
|
|
|
|
gboolean old_enabled, new_enabled;
|
|
|
|
|
|
2013-05-31 14:11:56 -05:00
|
|
|
/* Don't touch devices if asleep/networking disabled */
|
|
|
|
|
if (manager_sleeping (self))
|
|
|
|
|
return;
|
|
|
|
|
|
2014-01-24 13:24:01 -06:00
|
|
|
if (rstate->desc) {
|
2017-06-07 13:13:55 +02:00
|
|
|
_LOGD (LOGD_RFKILL, "rfkill: (%s): setting radio %s by user",
|
2016-03-02 11:38:26 +01:00
|
|
|
rstate->desc,
|
|
|
|
|
enabled ? "enabled" : "disabled");
|
2014-01-24 13:24:01 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Update enabled key in state file */
|
2016-04-07 18:42:24 +02:00
|
|
|
nm_config_state_set (priv->config, TRUE, FALSE,
|
|
|
|
|
rstate->key, enabled);
|
2014-01-24 13:24:01 -06:00
|
|
|
|
|
|
|
|
/* When the user toggles the radio, their request should override any
|
|
|
|
|
* daemon (like ModemManager) enabled state that can be changed. For WWAN
|
|
|
|
|
* for example, we want the WwanEnabled property to reflect the daemon state
|
|
|
|
|
* too so that users can toggle the modem powered, but we don't want that
|
|
|
|
|
* daemon state to affect whether or not the user *can* turn it on, which is
|
|
|
|
|
* what the kernel rfkill state does. So we ignore daemon enabled state
|
|
|
|
|
* when determining what the new state should be since it shouldn't block
|
|
|
|
|
* the user's request.
|
|
|
|
|
*/
|
|
|
|
|
old_enabled = radio_enabled_for_rstate (rstate, TRUE);
|
|
|
|
|
rstate->user_enabled = enabled;
|
|
|
|
|
new_enabled = radio_enabled_for_rstate (rstate, FALSE);
|
|
|
|
|
if (new_enabled != old_enabled) {
|
2013-05-31 14:11:56 -05:00
|
|
|
/* Try to change the kernel rfkill state */
|
|
|
|
|
if (rstate->rtype == RFKILL_TYPE_WLAN || rstate->rtype == RFKILL_TYPE_WWAN)
|
2016-04-27 18:26:39 +02:00
|
|
|
rfkill_change (self, rstate->desc, rstate->rtype, new_enabled);
|
2014-01-24 13:24:01 -06:00
|
|
|
|
2013-05-31 14:11:56 -05:00
|
|
|
manager_update_radio_enabled (self, rstate, new_enabled);
|
2014-01-24 13:24:01 -06:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
periodic_update_active_connection_timestamps (gpointer user_data)
|
|
|
|
|
{
|
|
|
|
|
NMManager *manager = NM_MANAGER (user_data);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (manager);
|
2017-11-23 21:30:09 +01:00
|
|
|
NMActiveConnection *ac;
|
2014-01-24 13:24:01 -06:00
|
|
|
|
2017-11-23 21:30:09 +01:00
|
|
|
c_list_for_each_entry (ac, &priv->active_connections_lst_head, active_connections_lst) {
|
2014-01-24 13:24:01 -06:00
|
|
|
if (nm_active_connection_get_state (ac) == NM_ACTIVE_CONNECTION_STATE_ACTIVATED) {
|
2017-11-23 21:30:09 +01:00
|
|
|
nm_settings_connection_update_timestamp (nm_active_connection_get_settings_connection (ac),
|
|
|
|
|
(guint64) time (NULL), FALSE);
|
2014-01-24 13:24:01 -06:00
|
|
|
}
|
|
|
|
|
}
|
2017-11-23 21:30:09 +01:00
|
|
|
return G_SOURCE_CONTINUE;
|
2014-01-24 13:24:01 -06:00
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2014-01-24 13:24:01 -06:00
|
|
|
|
2016-09-23 14:32:50 +02:00
|
|
|
void
|
|
|
|
|
nm_manager_set_capability (NMManager *self,
|
|
|
|
|
NMCapability cap)
|
2016-09-15 23:34:24 +03:00
|
|
|
{
|
2016-09-23 14:32:50 +02:00
|
|
|
NMManagerPrivate *priv;
|
|
|
|
|
guint32 cap_i;
|
|
|
|
|
gssize idx;
|
2016-09-15 23:34:24 +03:00
|
|
|
|
2016-09-23 14:32:50 +02:00
|
|
|
g_return_if_fail (NM_IS_MANAGER (self));
|
|
|
|
|
if (cap < 1 || cap > NM_CAPABILITY_TEAM)
|
|
|
|
|
g_return_if_reached ();
|
2016-09-15 23:34:24 +03:00
|
|
|
|
2016-09-23 14:32:50 +02:00
|
|
|
cap_i = (guint32) cap;
|
2016-09-15 23:34:24 +03:00
|
|
|
|
2016-09-23 14:32:50 +02:00
|
|
|
priv = NM_MANAGER_GET_PRIVATE (self);
|
2016-09-15 23:34:24 +03:00
|
|
|
|
2018-07-28 12:26:37 +02:00
|
|
|
idx = nm_utils_array_find_binary_search (&g_array_index (priv->capabilities, guint32, 0),
|
|
|
|
|
sizeof (guint32),
|
|
|
|
|
priv->capabilities->len,
|
|
|
|
|
&cap_i,
|
|
|
|
|
nm_cmp_uint32_p_with_data,
|
|
|
|
|
NULL);
|
2016-09-23 14:32:50 +02:00
|
|
|
if (idx >= 0)
|
|
|
|
|
return;
|
2016-09-15 23:34:24 +03:00
|
|
|
|
2016-09-23 14:32:50 +02:00
|
|
|
nm_assert ((~idx) <= (gssize) priv->capabilities->len);
|
|
|
|
|
|
|
|
|
|
g_array_insert_val (priv->capabilities, ~idx, cap_i);
|
|
|
|
|
_notify (self, PROP_CAPABILITIES);
|
2016-09-15 23:34:24 +03:00
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2016-09-15 23:34:24 +03:00
|
|
|
|
2015-08-10 16:38:26 +02:00
|
|
|
NM_DEFINE_SINGLETON_REGISTER (NMManager);
|
2011-09-26 11:30:24 -05:00
|
|
|
|
2009-06-11 00:39:12 -04:00
|
|
|
NMManager *
|
2011-09-26 11:30:24 -05:00
|
|
|
nm_manager_get (void)
|
|
|
|
|
{
|
2016-01-25 16:29:51 +01:00
|
|
|
g_return_val_if_fail (singleton_instance, NULL);
|
2015-08-03 09:26:31 -04:00
|
|
|
return singleton_instance;
|
2011-09-26 11:30:24 -05:00
|
|
|
}
|
|
|
|
|
|
2016-05-16 18:05:02 +02:00
|
|
|
NMSettings *
|
|
|
|
|
nm_settings_get (void)
|
|
|
|
|
{
|
|
|
|
|
g_return_val_if_fail (singleton_instance, NULL);
|
|
|
|
|
|
|
|
|
|
return NM_MANAGER_GET_PRIVATE (singleton_instance)->settings;
|
|
|
|
|
}
|
|
|
|
|
|
2011-09-26 11:30:24 -05:00
|
|
|
NMManager *
|
2015-11-06 16:45:27 +01:00
|
|
|
nm_manager_setup (void)
|
2009-06-11 00:39:12 -04:00
|
|
|
{
|
2015-08-03 09:26:31 -04:00
|
|
|
NMManager *self;
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2016-01-25 16:29:51 +01:00
|
|
|
g_return_val_if_fail (!singleton_instance, singleton_instance);
|
2016-01-18 14:22:50 +01:00
|
|
|
|
2015-11-06 16:45:27 +01:00
|
|
|
self = g_object_new (NM_TYPE_MANAGER, NULL);
|
2016-01-18 14:22:50 +01:00
|
|
|
nm_assert (NM_IS_MANAGER (self));
|
|
|
|
|
singleton_instance = self;
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2016-01-18 14:18:50 +01:00
|
|
|
nm_singleton_instance_register ();
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGD (LOGD_CORE, "setup %s singleton (%p)", "NMManager", singleton_instance);
|
2016-01-18 14:18:50 +01:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
nm_dbus_object_export (NM_DBUS_OBJECT (self));
|
2016-01-18 14:18:50 +01:00
|
|
|
return self;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
constructed (GObject *object)
|
|
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER (object);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2016-04-07 18:42:24 +02:00
|
|
|
const NMConfigState *state;
|
2016-01-18 14:18:50 +01:00
|
|
|
|
|
|
|
|
G_OBJECT_CLASS (nm_manager_parent_class)->constructed (object);
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2015-08-03 09:26:31 -04:00
|
|
|
priv->settings = nm_settings_new ();
|
2016-09-29 13:49:01 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
nm_dbus_object_export (NM_DBUS_OBJECT (priv->settings));
|
2016-09-29 13:49:01 +02:00
|
|
|
|
2015-08-03 09:26:31 -04:00
|
|
|
g_signal_connect (priv->settings, "notify::" NM_SETTINGS_STARTUP_COMPLETE,
|
|
|
|
|
G_CALLBACK (settings_startup_complete_changed), self);
|
|
|
|
|
g_signal_connect (priv->settings, "notify::" NM_SETTINGS_UNMANAGED_SPECS,
|
|
|
|
|
G_CALLBACK (system_unmanaged_devices_changed_cb), self);
|
|
|
|
|
g_signal_connect (priv->settings, NM_SETTINGS_SIGNAL_CONNECTION_ADDED,
|
2016-04-13 16:03:06 +02:00
|
|
|
G_CALLBACK (connection_added_cb), self);
|
|
|
|
|
g_signal_connect (priv->settings, NM_SETTINGS_SIGNAL_CONNECTION_UPDATED,
|
|
|
|
|
G_CALLBACK (connection_updated_cb), self);
|
2017-12-05 13:55:25 +01:00
|
|
|
g_signal_connect (priv->settings, NM_SETTINGS_SIGNAL_CONNECTION_FLAGS_CHANGED, G_CALLBACK (connection_flags_changed), self);
|
2017-04-23 14:20:37 +02:00
|
|
|
|
|
|
|
|
priv->hostname_manager = g_object_ref (nm_hostname_manager_get ());
|
|
|
|
|
g_signal_connect (priv->hostname_manager, "notify::" NM_HOSTNAME_MANAGER_HOSTNAME,
|
|
|
|
|
G_CALLBACK (hostname_changed_cb), self);
|
|
|
|
|
|
2016-04-13 16:03:06 +02:00
|
|
|
/*
|
|
|
|
|
* Do not delete existing virtual devices to keep connectivity up.
|
|
|
|
|
* Virtual devices are reused when NetworkManager is restarted.
|
|
|
|
|
* Hence, don't react on NM_SETTINGS_SIGNAL_CONNECTION_REMOVED.
|
|
|
|
|
*/
|
2015-08-03 09:26:31 -04:00
|
|
|
|
|
|
|
|
priv->policy = nm_policy_new (self, priv->settings);
|
2018-06-28 18:05:05 +02:00
|
|
|
g_signal_connect (priv->policy, "notify::" NM_POLICY_DEFAULT_IP4_AC,
|
|
|
|
|
G_CALLBACK (policy_default_ac_changed), self);
|
|
|
|
|
g_signal_connect (priv->policy, "notify::" NM_POLICY_DEFAULT_IP6_AC,
|
|
|
|
|
G_CALLBACK (policy_default_ac_changed), self);
|
|
|
|
|
g_signal_connect (priv->policy, "notify::" NM_POLICY_ACTIVATING_IP4_AC,
|
|
|
|
|
G_CALLBACK (policy_activating_ac_changed), self);
|
|
|
|
|
g_signal_connect (priv->policy, "notify::" NM_POLICY_ACTIVATING_IP6_AC,
|
|
|
|
|
G_CALLBACK (policy_activating_ac_changed), self);
|
2013-08-22 10:10:17 -04:00
|
|
|
|
2015-01-30 19:52:53 +01:00
|
|
|
priv->config = g_object_ref (nm_config_get ());
|
|
|
|
|
g_signal_connect (G_OBJECT (priv->config),
|
|
|
|
|
NM_CONFIG_SIGNAL_CONFIG_CHANGED,
|
|
|
|
|
G_CALLBACK (_config_changed_cb),
|
2015-08-03 09:26:31 -04:00
|
|
|
self);
|
2015-01-30 19:52:53 +01:00
|
|
|
|
2016-04-07 18:42:24 +02:00
|
|
|
state = nm_config_state_get (priv->config);
|
2015-11-06 16:45:27 +01:00
|
|
|
|
2016-04-07 18:42:24 +02:00
|
|
|
priv->net_enabled = state->net_enabled;
|
2015-11-06 16:45:27 +01:00
|
|
|
|
2016-04-07 18:42:24 +02:00
|
|
|
priv->radio_states[RFKILL_TYPE_WLAN].user_enabled = state->wifi_enabled;
|
|
|
|
|
priv->radio_states[RFKILL_TYPE_WWAN].user_enabled = state->wwan_enabled;
|
2015-11-06 16:45:27 +01:00
|
|
|
|
2013-05-09 10:24:08 -04:00
|
|
|
priv->rfkill_mgr = nm_rfkill_manager_new ();
|
|
|
|
|
g_signal_connect (priv->rfkill_mgr,
|
2016-08-23 11:45:51 +02:00
|
|
|
NM_RFKILL_MANAGER_SIGNAL_RFKILL_CHANGED,
|
2013-05-09 10:24:08 -04:00
|
|
|
G_CALLBACK (rfkill_manager_rfkill_changed_cb),
|
2015-08-03 09:26:31 -04:00
|
|
|
self);
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2018-11-21 11:32:38 +01:00
|
|
|
/* Force kernel Wi-Fi/WWAN rfkill state to follow NM saved Wi-Fi/WWAN state
|
2013-05-31 14:11:56 -05:00
|
|
|
* in case the BIOS doesn't save rfkill state, and to be consistent with user
|
|
|
|
|
* changes to the WirelessEnabled/WWANEnabled properties which toggle kernel
|
|
|
|
|
* rfkill.
|
2013-02-06 12:59:50 -06:00
|
|
|
*/
|
2016-04-27 18:26:39 +02:00
|
|
|
rfkill_change (self, priv->radio_states[RFKILL_TYPE_WLAN].desc, RFKILL_TYPE_WLAN, priv->radio_states[RFKILL_TYPE_WLAN].user_enabled);
|
|
|
|
|
rfkill_change (self, priv->radio_states[RFKILL_TYPE_WWAN].desc, RFKILL_TYPE_WWAN, priv->radio_states[RFKILL_TYPE_WWAN].user_enabled);
|
2009-06-11 00:39:12 -04:00
|
|
|
}
|
|
|
|
|
|
2011-07-01 14:16:52 -05:00
|
|
|
static void
|
2016-03-02 11:38:26 +01:00
|
|
|
nm_manager_init (NMManager *self)
|
2009-06-11 00:39:12 -04:00
|
|
|
{
|
2016-03-02 11:38:26 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2014-01-24 13:24:01 -06:00
|
|
|
guint i;
|
|
|
|
|
GFile *file;
|
2010-05-28 18:23:00 -07:00
|
|
|
|
2017-09-29 15:04:53 +02:00
|
|
|
c_list_init (&priv->link_cb_lst);
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_init (&priv->devices_lst_head);
|
2017-11-23 21:30:09 +01:00
|
|
|
c_list_init (&priv->active_connections_lst_head);
|
2018-04-18 09:06:54 +02:00
|
|
|
c_list_init (&priv->async_op_lst_head);
|
2017-12-05 13:55:25 +01:00
|
|
|
c_list_init (&priv->delete_volatile_connection_lst_head);
|
2017-09-29 15:04:53 +02:00
|
|
|
|
2017-09-29 15:11:33 +02:00
|
|
|
priv->platform = g_object_ref (NM_PLATFORM_GET);
|
|
|
|
|
|
2016-09-23 13:24:10 +02:00
|
|
|
priv->capabilities = g_array_new (FALSE, FALSE, sizeof (guint32));
|
|
|
|
|
|
2014-01-24 13:24:01 -06:00
|
|
|
/* Initialize rfkill structures and states */
|
|
|
|
|
memset (priv->radio_states, 0, sizeof (priv->radio_states));
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2014-01-24 13:24:01 -06:00
|
|
|
priv->radio_states[RFKILL_TYPE_WLAN].user_enabled = TRUE;
|
2016-04-07 18:42:24 +02:00
|
|
|
priv->radio_states[RFKILL_TYPE_WLAN].key = NM_CONFIG_STATE_PROPERTY_WIFI_ENABLED;
|
2014-01-24 13:24:01 -06:00
|
|
|
priv->radio_states[RFKILL_TYPE_WLAN].prop = NM_MANAGER_WIRELESS_ENABLED;
|
|
|
|
|
priv->radio_states[RFKILL_TYPE_WLAN].hw_prop = NM_MANAGER_WIRELESS_HARDWARE_ENABLED;
|
2018-11-21 11:32:38 +01:00
|
|
|
priv->radio_states[RFKILL_TYPE_WLAN].desc = "Wi-Fi";
|
2014-01-24 13:24:01 -06:00
|
|
|
priv->radio_states[RFKILL_TYPE_WLAN].rtype = RFKILL_TYPE_WLAN;
|
2012-08-22 17:11:31 -05:00
|
|
|
|
2014-01-24 13:24:01 -06:00
|
|
|
priv->radio_states[RFKILL_TYPE_WWAN].user_enabled = TRUE;
|
2016-04-07 18:42:24 +02:00
|
|
|
priv->radio_states[RFKILL_TYPE_WWAN].key = NM_CONFIG_STATE_PROPERTY_WWAN_ENABLED;
|
2014-01-24 13:24:01 -06:00
|
|
|
priv->radio_states[RFKILL_TYPE_WWAN].prop = NM_MANAGER_WWAN_ENABLED;
|
|
|
|
|
priv->radio_states[RFKILL_TYPE_WWAN].hw_prop = NM_MANAGER_WWAN_HARDWARE_ENABLED;
|
|
|
|
|
priv->radio_states[RFKILL_TYPE_WWAN].desc = "WWAN";
|
|
|
|
|
priv->radio_states[RFKILL_TYPE_WWAN].rtype = RFKILL_TYPE_WWAN;
|
2012-08-22 17:11:31 -05:00
|
|
|
|
2014-01-24 13:24:01 -06:00
|
|
|
for (i = 0; i < RFKILL_TYPE_MAX; i++)
|
|
|
|
|
priv->radio_states[i].hw_enabled = TRUE;
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2014-01-24 13:24:01 -06:00
|
|
|
priv->sleeping = FALSE;
|
|
|
|
|
priv->state = NM_STATE_DISCONNECTED;
|
|
|
|
|
priv->startup = TRUE;
|
2013-08-22 10:10:17 -04:00
|
|
|
|
2014-01-24 13:24:01 -06:00
|
|
|
/* sleep/wake handling */
|
2016-04-28 13:45:49 +02:00
|
|
|
priv->sleep_monitor = nm_sleep_monitor_new ();
|
2015-01-05 16:37:45 +01:00
|
|
|
g_signal_connect (priv->sleep_monitor, NM_SLEEP_MONITOR_SLEEPING,
|
2016-03-02 11:38:26 +01:00
|
|
|
G_CALLBACK (sleeping_cb), self);
|
2011-04-22 14:56:31 -05:00
|
|
|
|
2014-01-24 13:24:01 -06:00
|
|
|
/* Listen for authorization changes */
|
2016-07-01 14:25:34 +02:00
|
|
|
priv->auth_mgr = g_object_ref (nm_auth_manager_get ());
|
|
|
|
|
g_signal_connect (priv->auth_mgr,
|
2014-08-14 13:34:57 +02:00
|
|
|
NM_AUTH_MANAGER_SIGNAL_CHANGED,
|
2016-07-01 14:25:34 +02:00
|
|
|
G_CALLBACK (auth_mgr_changed),
|
2016-03-02 11:38:26 +01:00
|
|
|
self);
|
2014-08-14 13:34:57 +02:00
|
|
|
|
2014-01-24 13:24:01 -06:00
|
|
|
/* Monitor the firmware directory */
|
|
|
|
|
if (strlen (KERNEL_FIRMWARE_DIR)) {
|
|
|
|
|
file = g_file_new_for_path (KERNEL_FIRMWARE_DIR "/");
|
|
|
|
|
priv->fw_monitor = g_file_monitor_directory (file, G_FILE_MONITOR_NONE, NULL, NULL);
|
|
|
|
|
g_object_unref (file);
|
2011-04-22 14:56:31 -05:00
|
|
|
}
|
|
|
|
|
|
2014-01-24 13:24:01 -06:00
|
|
|
if (priv->fw_monitor) {
|
|
|
|
|
g_signal_connect (priv->fw_monitor, "changed",
|
|
|
|
|
G_CALLBACK (firmware_dir_changed),
|
2016-03-02 11:38:26 +01:00
|
|
|
self);
|
|
|
|
|
_LOGI (LOGD_CORE, "monitoring kernel firmware directory '%s'.",
|
2014-01-24 13:24:01 -06:00
|
|
|
KERNEL_FIRMWARE_DIR);
|
2011-04-22 14:56:31 -05:00
|
|
|
} else {
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGW (LOGD_CORE, "failed to monitor kernel firmware directory '%s'.",
|
|
|
|
|
KERNEL_FIRMWARE_DIR);
|
2011-04-22 14:56:31 -05:00
|
|
|
}
|
2010-09-01 17:08:10 -05:00
|
|
|
|
2014-01-24 13:24:01 -06:00
|
|
|
/* Update timestamps in active connections */
|
2016-03-02 11:38:26 +01:00
|
|
|
priv->timestamp_update_id = g_timeout_add_seconds (300, (GSourceFunc) periodic_update_active_connection_timestamps, self);
|
2015-06-03 09:15:24 +02:00
|
|
|
|
|
|
|
|
priv->metered = NM_METERED_UNKNOWN;
|
2017-11-15 16:06:43 +01:00
|
|
|
priv->sleep_devices = g_hash_table_new (nm_direct_hash, NULL);
|
2009-06-11 00:39:12 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
get_property (GObject *object, guint prop_id,
|
2015-09-15 11:34:00 +02:00
|
|
|
GValue *value, GParamSpec *pspec)
|
2009-06-11 00:39:12 -04:00
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER (object);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2015-07-03 11:06:39 +02:00
|
|
|
NMConfigData *config_data;
|
|
|
|
|
const NMGlobalDnsConfig *dns_config;
|
2014-10-23 13:56:52 -04:00
|
|
|
const char *type;
|
2017-11-23 21:30:09 +01:00
|
|
|
const char *path;
|
|
|
|
|
NMActiveConnection *ac;
|
|
|
|
|
GPtrArray *ptrarr;
|
2009-06-11 00:39:12 -04:00
|
|
|
|
|
|
|
|
switch (prop_id) {
|
2010-09-27 10:34:56 +02:00
|
|
|
case PROP_VERSION:
|
|
|
|
|
g_value_set_string (value, VERSION);
|
|
|
|
|
break;
|
2016-09-15 23:34:24 +03:00
|
|
|
case PROP_CAPABILITIES:
|
2016-09-23 13:16:20 +02:00
|
|
|
g_value_set_variant (value, g_variant_new_fixed_array (G_VARIANT_TYPE ("u"),
|
2016-09-15 23:34:24 +03:00
|
|
|
priv->capabilities->data,
|
|
|
|
|
priv->capabilities->len,
|
2016-09-23 13:16:20 +02:00
|
|
|
sizeof (guint32)));
|
2016-09-15 23:34:24 +03:00
|
|
|
break;
|
2009-06-11 00:39:12 -04:00
|
|
|
case PROP_STATE:
|
|
|
|
|
g_value_set_uint (value, priv->state);
|
|
|
|
|
break;
|
2013-08-13 17:45:34 -04:00
|
|
|
case PROP_STARTUP:
|
|
|
|
|
g_value_set_boolean (value, priv->startup);
|
|
|
|
|
break;
|
2010-05-22 08:55:30 -07:00
|
|
|
case PROP_NETWORKING_ENABLED:
|
|
|
|
|
g_value_set_boolean (value, priv->net_enabled);
|
|
|
|
|
break;
|
2009-06-11 00:39:12 -04:00
|
|
|
case PROP_WIRELESS_ENABLED:
|
2011-04-13 21:58:25 -05:00
|
|
|
g_value_set_boolean (value, radio_enabled_for_type (self, RFKILL_TYPE_WLAN, TRUE));
|
2009-06-11 00:39:12 -04:00
|
|
|
break;
|
|
|
|
|
case PROP_WIRELESS_HARDWARE_ENABLED:
|
2009-12-23 00:03:45 -08:00
|
|
|
g_value_set_boolean (value, priv->radio_states[RFKILL_TYPE_WLAN].hw_enabled);
|
2009-06-11 00:39:12 -04:00
|
|
|
break;
|
2009-12-23 00:18:18 -08:00
|
|
|
case PROP_WWAN_ENABLED:
|
2011-04-13 21:58:25 -05:00
|
|
|
g_value_set_boolean (value, radio_enabled_for_type (self, RFKILL_TYPE_WWAN, TRUE));
|
2009-12-23 00:18:18 -08:00
|
|
|
break;
|
|
|
|
|
case PROP_WWAN_HARDWARE_ENABLED:
|
|
|
|
|
g_value_set_boolean (value, priv->radio_states[RFKILL_TYPE_WWAN].hw_enabled);
|
2009-06-11 00:39:12 -04:00
|
|
|
break;
|
2009-12-29 11:27:10 +02:00
|
|
|
case PROP_WIMAX_ENABLED:
|
2015-08-10 16:56:21 +02:00
|
|
|
g_value_set_boolean (value, FALSE);
|
2009-12-29 11:27:10 +02:00
|
|
|
break;
|
|
|
|
|
case PROP_WIMAX_HARDWARE_ENABLED:
|
2015-08-10 16:56:21 +02:00
|
|
|
g_value_set_boolean (value, FALSE);
|
2009-12-29 11:27:10 +02:00
|
|
|
break;
|
2009-06-11 00:39:12 -04:00
|
|
|
case PROP_ACTIVE_CONNECTIONS:
|
2017-11-23 21:30:09 +01:00
|
|
|
ptrarr = g_ptr_array_new ();
|
|
|
|
|
c_list_for_each_entry (ac, &priv->active_connections_lst_head, active_connections_lst) {
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
path = nm_dbus_object_get_path (NM_DBUS_OBJECT (ac));
|
2017-11-23 21:30:09 +01:00
|
|
|
if (path)
|
|
|
|
|
g_ptr_array_add (ptrarr, g_strdup (path));
|
|
|
|
|
}
|
|
|
|
|
g_ptr_array_add (ptrarr, NULL);
|
|
|
|
|
g_value_take_boxed (value, g_ptr_array_free (ptrarr, FALSE));
|
2009-06-11 00:39:12 -04:00
|
|
|
break;
|
2013-07-30 16:31:31 -04:00
|
|
|
case PROP_CONNECTIVITY:
|
2017-03-20 13:36:00 +00:00
|
|
|
g_value_set_uint (value, priv->connectivity_state);
|
2013-07-30 16:31:31 -04:00
|
|
|
break;
|
2017-08-09 15:20:04 +08:00
|
|
|
case PROP_CONNECTIVITY_CHECK_AVAILABLE:
|
|
|
|
|
config_data = nm_config_get_data (priv->config);
|
|
|
|
|
g_value_set_boolean (value, nm_config_data_get_connectivity_uri (config_data) != NULL);
|
|
|
|
|
break;
|
|
|
|
|
case PROP_CONNECTIVITY_CHECK_ENABLED:
|
2018-04-10 15:55:16 +02:00
|
|
|
g_value_set_boolean (value, concheck_enabled (self, NULL));
|
2017-08-09 15:20:04 +08:00
|
|
|
break;
|
2013-08-22 13:06:51 -04:00
|
|
|
case PROP_PRIMARY_CONNECTION:
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
nm_dbus_utils_g_value_set_object_path (value, priv->primary_connection);
|
2013-08-22 13:06:51 -04:00
|
|
|
break;
|
2014-10-23 13:56:52 -04:00
|
|
|
case PROP_PRIMARY_CONNECTION_TYPE:
|
2015-07-14 16:53:24 +02:00
|
|
|
type = NULL;
|
|
|
|
|
if (priv->primary_connection) {
|
|
|
|
|
NMConnection *con;
|
|
|
|
|
|
|
|
|
|
con = nm_active_connection_get_applied_connection (priv->primary_connection);
|
|
|
|
|
if (con)
|
|
|
|
|
type = nm_connection_get_connection_type (con);
|
|
|
|
|
}
|
2018-04-24 11:20:03 +02:00
|
|
|
g_value_set_string (value, type ?: "");
|
2014-10-23 13:56:52 -04:00
|
|
|
break;
|
2013-08-22 13:06:51 -04:00
|
|
|
case PROP_ACTIVATING_CONNECTION:
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
nm_dbus_utils_g_value_set_object_path (value, priv->activating_connection);
|
2013-08-22 13:06:51 -04:00
|
|
|
break;
|
2009-10-20 15:25:04 -07:00
|
|
|
case PROP_SLEEPING:
|
|
|
|
|
g_value_set_boolean (value, priv->sleeping);
|
|
|
|
|
break;
|
2014-01-08 12:18:33 -06:00
|
|
|
case PROP_DEVICES:
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
g_value_take_boxed (value,
|
|
|
|
|
nm_utils_strv_make_deep_copied (_get_devices_paths (self,
|
|
|
|
|
FALSE)));
|
2014-01-08 12:18:33 -06:00
|
|
|
break;
|
2015-06-03 09:15:24 +02:00
|
|
|
case PROP_METERED:
|
|
|
|
|
g_value_set_uint (value, priv->metered);
|
|
|
|
|
break;
|
2015-07-03 11:06:39 +02:00
|
|
|
case PROP_GLOBAL_DNS_CONFIGURATION:
|
|
|
|
|
config_data = nm_config_get_data (priv->config);
|
|
|
|
|
dns_config = nm_config_data_get_global_dns_config (config_data);
|
|
|
|
|
nm_global_dns_config_to_dbus (dns_config, value);
|
|
|
|
|
break;
|
2014-10-06 11:21:54 -05:00
|
|
|
case PROP_ALL_DEVICES:
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
g_value_take_boxed (value,
|
|
|
|
|
nm_utils_strv_make_deep_copied (_get_devices_paths (self,
|
|
|
|
|
TRUE)));
|
2014-10-06 11:21:54 -05:00
|
|
|
break;
|
2017-10-21 16:05:14 +02:00
|
|
|
case PROP_CHECKPOINTS:
|
2018-03-27 16:19:20 +02:00
|
|
|
g_value_take_boxed (value,
|
|
|
|
|
priv->checkpoint_mgr
|
|
|
|
|
? nm_utils_strv_make_deep_copied (nm_checkpoint_manager_get_checkpoint_paths (priv->checkpoint_mgr,
|
|
|
|
|
NULL))
|
|
|
|
|
: NULL);
|
2017-10-21 16:05:14 +02:00
|
|
|
break;
|
2009-06-11 00:39:12 -04:00
|
|
|
default:
|
|
|
|
|
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-03-20 08:51:08 -05:00
|
|
|
static void
|
2014-01-24 13:24:01 -06:00
|
|
|
set_property (GObject *object, guint prop_id,
|
2016-01-18 14:18:50 +01:00
|
|
|
const GValue *value, GParamSpec *pspec)
|
2013-03-20 08:51:08 -05:00
|
|
|
{
|
2014-01-24 13:24:01 -06:00
|
|
|
NMManager *self = NM_MANAGER (object);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2015-07-03 11:06:39 +02:00
|
|
|
NMGlobalDnsConfig *dns_config;
|
|
|
|
|
GError *error = NULL;
|
2013-03-20 08:51:08 -05:00
|
|
|
|
2014-01-24 13:24:01 -06:00
|
|
|
switch (prop_id) {
|
|
|
|
|
case PROP_WIRELESS_ENABLED:
|
2015-11-06 16:45:27 +01:00
|
|
|
manager_radio_user_toggled (NM_MANAGER (object),
|
|
|
|
|
&priv->radio_states[RFKILL_TYPE_WLAN],
|
|
|
|
|
g_value_get_boolean (value));
|
2014-01-24 13:24:01 -06:00
|
|
|
break;
|
|
|
|
|
case PROP_WWAN_ENABLED:
|
2015-11-06 16:45:27 +01:00
|
|
|
manager_radio_user_toggled (NM_MANAGER (object),
|
|
|
|
|
&priv->radio_states[RFKILL_TYPE_WWAN],
|
|
|
|
|
g_value_get_boolean (value));
|
2014-01-24 13:24:01 -06:00
|
|
|
break;
|
|
|
|
|
case PROP_WIMAX_ENABLED:
|
2018-09-15 07:20:54 -04:00
|
|
|
/* WIMAX is deprecated. This does nothing. */
|
2014-01-24 13:24:01 -06:00
|
|
|
break;
|
2017-08-09 15:20:04 +08:00
|
|
|
case PROP_CONNECTIVITY_CHECK_ENABLED:
|
|
|
|
|
nm_config_set_connectivity_check_enabled (priv->config,
|
|
|
|
|
g_value_get_boolean (value));
|
|
|
|
|
break;
|
2015-07-03 11:06:39 +02:00
|
|
|
case PROP_GLOBAL_DNS_CONFIGURATION:
|
|
|
|
|
dns_config = nm_global_dns_config_from_dbus (value, &error);
|
|
|
|
|
if (!error)
|
|
|
|
|
nm_config_set_global_dns (priv->config, dns_config, &error);
|
|
|
|
|
|
|
|
|
|
nm_global_dns_config_free (dns_config);
|
|
|
|
|
|
|
|
|
|
if (error) {
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGD (LOGD_CORE, "set global DNS failed with error: %s", error->message);
|
2015-07-03 11:06:39 +02:00
|
|
|
g_error_free (error);
|
|
|
|
|
}
|
|
|
|
|
break;
|
2014-01-24 13:24:01 -06:00
|
|
|
default:
|
|
|
|
|
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
|
|
|
|
|
break;
|
2013-03-20 08:51:08 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-09-17 14:17:30 -05:00
|
|
|
static void
|
|
|
|
|
_deinit_device_factory (NMDeviceFactory *factory, gpointer user_data)
|
|
|
|
|
{
|
|
|
|
|
g_signal_handlers_disconnect_matched (factory, G_SIGNAL_MATCH_DATA, 0, 0, NULL, NULL, NM_MANAGER (user_data));
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-11 00:39:12 -04:00
|
|
|
static void
|
2014-01-24 13:24:01 -06:00
|
|
|
dispose (GObject *object)
|
2009-06-11 00:39:12 -04:00
|
|
|
{
|
2017-09-29 15:18:48 +02:00
|
|
|
NMManager *self = NM_MANAGER (object);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE (self);
|
2017-09-29 15:04:53 +02:00
|
|
|
CList *iter, *iter_safe;
|
2017-11-23 21:30:09 +01:00
|
|
|
NMActiveConnection *ac, *ac_safe;
|
2009-12-23 00:03:45 -08:00
|
|
|
|
2018-04-18 09:06:54 +02:00
|
|
|
nm_assert (c_list_is_empty (&priv->async_op_lst_head));
|
|
|
|
|
|
2017-09-29 15:19:19 +02:00
|
|
|
g_signal_handlers_disconnect_by_func (priv->platform,
|
|
|
|
|
G_CALLBACK (platform_link_cb),
|
|
|
|
|
self);
|
2017-09-29 15:04:53 +02:00
|
|
|
c_list_for_each_safe (iter, iter_safe, &priv->link_cb_lst) {
|
|
|
|
|
PlatformLinkCbData *data = c_list_entry (iter, PlatformLinkCbData, lst);
|
|
|
|
|
|
|
|
|
|
g_source_remove (data->idle_id);
|
2017-11-28 11:22:01 +01:00
|
|
|
c_list_unlink_stale (iter);
|
2017-09-29 15:04:53 +02:00
|
|
|
g_slice_free (PlatformLinkCbData, data);
|
|
|
|
|
}
|
2017-09-29 15:19:19 +02:00
|
|
|
|
2018-04-09 11:52:55 +02:00
|
|
|
g_slist_free_full (priv->auth_chains, (GDestroyNotify) nm_auth_chain_destroy);
|
2014-01-24 13:24:01 -06:00
|
|
|
priv->auth_chains = NULL;
|
2009-12-23 00:03:45 -08:00
|
|
|
|
2017-03-14 10:42:36 +01:00
|
|
|
nm_clear_g_source (&priv->devices_inited_id);
|
|
|
|
|
|
2018-03-27 16:19:20 +02:00
|
|
|
g_clear_pointer (&priv->checkpoint_mgr, nm_checkpoint_manager_free);
|
2016-07-01 12:11:01 +02:00
|
|
|
|
connectivity: schedule connectivity timers per-device and probe for short outages
It might happen, that connectivitiy is lost only for a moment and
returns soon after. Based on that assumption, when we loose connectivity
we want to have a probe interval where we check for returning
connectivity more frequently.
For that, we handle tracking of the timeouts per-device.
The intervall shall start with 1 seconds, and double the interval time until
the full interval is reached. Actually, due to the implementation, it's unlikely
that we already perform the second check 1 second later. That is because commonly
the first check returns before the one second timeout is reached and bumps the
interval to 2 seconds right away.
Also, we go through extra lengths so that manual connectivity check
delay the periodic checks. By being more smart about that, we can reduce
the number of connectivity checks, but still keeping the promise to
check at least within the requested interval.
The complexity of book keeping the timeouts is remarkable. But I think
it is worth the effort and we should try hard to
- have a connectivity state as accurate as possible. Clearly,
connectivity checking means that we probing, so being more intelligent
about timeout and backoff timers can result in a better connectivity
state. The connectivity state is important because we use it for
the default-route penaly and the GUI indicates bad connectivity.
- be intelligent about avoiding redundant connectivity checks. While
we want to check often to get an accurate connectivity state, we
also want to minimize the number of HTTP requests, in case the
connectivity is established and suppossedly stable.
Also, perform connectivity checks in every state of the device.
Even if a device is disconnected, it still might have connectivity,
for example if the user externally adds an IP address on an unmanaged
device.
https://bugzilla.gnome.org/show_bug.cgi?id=792240
2018-02-20 21:41:14 +01:00
|
|
|
if (priv->concheck_mgr) {
|
|
|
|
|
g_signal_handlers_disconnect_by_func (priv->concheck_mgr,
|
|
|
|
|
G_CALLBACK (concheck_config_changed_cb),
|
|
|
|
|
self);
|
|
|
|
|
g_clear_object (&priv->concheck_mgr);
|
|
|
|
|
}
|
|
|
|
|
|
2016-07-01 14:25:34 +02:00
|
|
|
if (priv->auth_mgr) {
|
|
|
|
|
g_signal_handlers_disconnect_by_func (priv->auth_mgr,
|
|
|
|
|
G_CALLBACK (auth_mgr_changed),
|
2017-09-29 15:18:48 +02:00
|
|
|
self);
|
2016-07-01 14:25:34 +02:00
|
|
|
g_clear_object (&priv->auth_mgr);
|
|
|
|
|
}
|
2009-12-23 00:18:18 -08:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
nm_assert (c_list_is_empty (&priv->devices_lst_head));
|
2010-04-08 18:23:43 -07:00
|
|
|
|
2016-01-04 09:46:22 +01:00
|
|
|
nm_clear_g_source (&priv->ac_cleanup_id);
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2017-11-23 21:30:09 +01:00
|
|
|
c_list_for_each_entry_safe (ac, ac_safe, &priv->active_connections_lst_head, active_connections_lst)
|
|
|
|
|
active_connection_remove (self, ac);
|
|
|
|
|
nm_assert (c_list_is_empty (&priv->active_connections_lst_head));
|
2014-01-24 13:24:01 -06:00
|
|
|
g_clear_object (&priv->primary_connection);
|
|
|
|
|
g_clear_object (&priv->activating_connection);
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2015-01-30 19:52:53 +01:00
|
|
|
if (priv->config) {
|
2017-09-29 15:18:48 +02:00
|
|
|
g_signal_handlers_disconnect_by_func (priv->config, _config_changed_cb, self);
|
2015-01-30 19:52:53 +01:00
|
|
|
g_clear_object (&priv->config);
|
|
|
|
|
}
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2014-01-24 13:24:01 -06:00
|
|
|
if (priv->policy) {
|
2018-06-28 18:05:05 +02:00
|
|
|
g_signal_handlers_disconnect_by_func (priv->policy, policy_default_ac_changed, self);
|
|
|
|
|
g_signal_handlers_disconnect_by_func (priv->policy, policy_activating_ac_changed, self);
|
2014-01-24 13:24:01 -06:00
|
|
|
g_clear_object (&priv->policy);
|
|
|
|
|
}
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2015-09-21 14:30:02 +02:00
|
|
|
if (priv->settings) {
|
2017-09-29 15:18:48 +02:00
|
|
|
g_signal_handlers_disconnect_by_func (priv->settings, settings_startup_complete_changed, self);
|
|
|
|
|
g_signal_handlers_disconnect_by_func (priv->settings, system_unmanaged_devices_changed_cb, self);
|
|
|
|
|
g_signal_handlers_disconnect_by_func (priv->settings, connection_added_cb, self);
|
|
|
|
|
g_signal_handlers_disconnect_by_func (priv->settings, connection_updated_cb, self);
|
2017-12-05 13:55:25 +01:00
|
|
|
g_signal_handlers_disconnect_by_func (priv->settings, connection_flags_changed, self);
|
2015-09-21 14:30:02 +02:00
|
|
|
g_clear_object (&priv->settings);
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-23 14:20:37 +02:00
|
|
|
if (priv->hostname_manager) {
|
2017-09-29 15:18:48 +02:00
|
|
|
g_signal_handlers_disconnect_by_func (priv->hostname_manager, hostname_changed_cb, self);
|
2017-04-23 14:20:37 +02:00
|
|
|
g_clear_object (&priv->hostname_manager);
|
|
|
|
|
}
|
|
|
|
|
|
2014-01-24 13:24:01 -06:00
|
|
|
g_clear_object (&priv->vpn_manager);
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2017-09-29 15:18:48 +02:00
|
|
|
sleep_devices_clear (self);
|
2016-05-05 14:14:40 +02:00
|
|
|
g_clear_pointer (&priv->sleep_devices, g_hash_table_unref);
|
|
|
|
|
|
2015-01-05 16:37:45 +01:00
|
|
|
if (priv->sleep_monitor) {
|
2017-09-29 15:18:48 +02:00
|
|
|
g_signal_handlers_disconnect_by_func (priv->sleep_monitor, sleeping_cb, self);
|
2015-01-05 16:37:45 +01:00
|
|
|
g_clear_object (&priv->sleep_monitor);
|
|
|
|
|
}
|
2010-05-28 18:23:00 -07:00
|
|
|
|
2014-01-24 13:24:01 -06:00
|
|
|
if (priv->fw_monitor) {
|
2017-09-29 15:18:48 +02:00
|
|
|
g_signal_handlers_disconnect_by_func (priv->fw_monitor, firmware_dir_changed, self);
|
2010-08-31 15:45:55 -05:00
|
|
|
|
2016-01-04 09:46:22 +01:00
|
|
|
nm_clear_g_source (&priv->fw_changed_id);
|
2010-07-01 10:32:11 -07:00
|
|
|
|
2014-01-24 13:24:01 -06:00
|
|
|
g_file_monitor_cancel (priv->fw_monitor);
|
|
|
|
|
g_clear_object (&priv->fw_monitor);
|
2010-07-01 10:32:11 -07:00
|
|
|
}
|
|
|
|
|
|
2015-09-21 14:38:25 +02:00
|
|
|
if (priv->rfkill_mgr) {
|
2017-09-29 15:18:48 +02:00
|
|
|
g_signal_handlers_disconnect_by_func (priv->rfkill_mgr, rfkill_manager_rfkill_changed_cb, self);
|
2015-09-21 14:38:25 +02:00
|
|
|
g_clear_object (&priv->rfkill_mgr);
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-10 16:18:16 +01:00
|
|
|
nm_clear_g_source (&priv->delete_volatile_connection_idle_id);
|
|
|
|
|
_delete_volatile_connection_all (self, FALSE);
|
|
|
|
|
nm_assert (!priv->delete_volatile_connection_idle_id);
|
|
|
|
|
nm_assert (c_list_is_empty (&priv->delete_volatile_connection_lst_head));
|
|
|
|
|
|
2017-09-29 15:18:48 +02:00
|
|
|
nm_device_factory_manager_for_each_factory (_deinit_device_factory, self);
|
2016-01-18 14:18:50 +01:00
|
|
|
|
2016-01-04 09:46:22 +01:00
|
|
|
nm_clear_g_source (&priv->timestamp_update_id);
|
2012-02-05 23:18:32 -06:00
|
|
|
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
g_clear_pointer (&priv->device_route_metrics, g_hash_table_destroy);
|
|
|
|
|
|
2016-09-23 13:24:10 +02:00
|
|
|
G_OBJECT_CLASS (nm_manager_parent_class)->dispose (object);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
finalize (GObject *object)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE ((NMManager *) object);
|
|
|
|
|
|
2016-09-15 23:34:24 +03:00
|
|
|
g_array_free (priv->capabilities, TRUE);
|
|
|
|
|
|
2016-09-23 13:24:10 +02:00
|
|
|
G_OBJECT_CLASS (nm_manager_parent_class)->finalize (object);
|
2017-09-29 15:11:33 +02:00
|
|
|
|
|
|
|
|
g_object_unref (priv->platform);
|
2009-06-11 00:39:12 -04:00
|
|
|
}
|
|
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
static const GDBusSignalInfo signal_info_check_permissions = NM_DEFINE_GDBUS_SIGNAL_INFO_INIT (
|
|
|
|
|
"CheckPermissions",
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
static const GDBusSignalInfo signal_info_state_changed = NM_DEFINE_GDBUS_SIGNAL_INFO_INIT (
|
|
|
|
|
"StateChanged",
|
|
|
|
|
.args = NM_DEFINE_GDBUS_ARG_INFOS (
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("state", "u"),
|
|
|
|
|
),
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
static const GDBusSignalInfo signal_info_device_added = NM_DEFINE_GDBUS_SIGNAL_INFO_INIT (
|
|
|
|
|
"DeviceAdded",
|
|
|
|
|
.args = NM_DEFINE_GDBUS_ARG_INFOS (
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("device_path", "o"),
|
|
|
|
|
),
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
static const GDBusSignalInfo signal_info_device_removed = NM_DEFINE_GDBUS_SIGNAL_INFO_INIT (
|
|
|
|
|
"DeviceRemoved",
|
|
|
|
|
.args = NM_DEFINE_GDBUS_ARG_INFOS (
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("device_path", "o"),
|
|
|
|
|
),
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
static const NMDBusInterfaceInfoExtended interface_info_manager = {
|
|
|
|
|
.parent = NM_DEFINE_GDBUS_INTERFACE_INFO_INIT (
|
|
|
|
|
NM_DBUS_INTERFACE,
|
|
|
|
|
.methods = NM_DEFINE_GDBUS_METHOD_INFOS (
|
|
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED (
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT (
|
|
|
|
|
"Reload",
|
|
|
|
|
.in_args = NM_DEFINE_GDBUS_ARG_INFOS (
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("flags", "u"),
|
|
|
|
|
),
|
|
|
|
|
),
|
|
|
|
|
.handle = impl_manager_reload,
|
|
|
|
|
),
|
|
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED (
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT (
|
|
|
|
|
"GetDevices",
|
|
|
|
|
.out_args = NM_DEFINE_GDBUS_ARG_INFOS (
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("devices", "ao"),
|
|
|
|
|
),
|
|
|
|
|
),
|
|
|
|
|
.handle = impl_manager_get_devices,
|
|
|
|
|
),
|
|
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED (
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT (
|
|
|
|
|
"GetAllDevices",
|
|
|
|
|
.out_args = NM_DEFINE_GDBUS_ARG_INFOS (
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("devices", "ao"),
|
|
|
|
|
),
|
|
|
|
|
),
|
|
|
|
|
.handle = impl_manager_get_all_devices,
|
|
|
|
|
),
|
|
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED (
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT (
|
|
|
|
|
"GetDeviceByIpIface",
|
|
|
|
|
.in_args = NM_DEFINE_GDBUS_ARG_INFOS (
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("iface", "s"),
|
|
|
|
|
),
|
|
|
|
|
.out_args = NM_DEFINE_GDBUS_ARG_INFOS (
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("device", "o"),
|
|
|
|
|
),
|
|
|
|
|
),
|
|
|
|
|
.handle = impl_manager_get_device_by_ip_iface,
|
|
|
|
|
),
|
|
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED (
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT (
|
|
|
|
|
"ActivateConnection",
|
|
|
|
|
.in_args = NM_DEFINE_GDBUS_ARG_INFOS (
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("connection", "o"),
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("device", "o"),
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("specific_object", "o"),
|
|
|
|
|
),
|
|
|
|
|
.out_args = NM_DEFINE_GDBUS_ARG_INFOS (
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("active_connection", "o"),
|
|
|
|
|
),
|
|
|
|
|
),
|
|
|
|
|
.handle = impl_manager_activate_connection,
|
|
|
|
|
),
|
|
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED (
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT (
|
|
|
|
|
"AddAndActivateConnection",
|
|
|
|
|
.in_args = NM_DEFINE_GDBUS_ARG_INFOS (
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("connection", "a{sa{sv}}"),
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("device", "o"),
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("specific_object", "o"),
|
|
|
|
|
),
|
|
|
|
|
.out_args = NM_DEFINE_GDBUS_ARG_INFOS (
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("path", "o"),
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("active_connection", "o"),
|
|
|
|
|
),
|
|
|
|
|
),
|
|
|
|
|
.handle = impl_manager_add_and_activate_connection,
|
2018-10-30 16:40:40 +01:00
|
|
|
),
|
|
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED (
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT (
|
|
|
|
|
"AddAndActivateConnection2",
|
|
|
|
|
.in_args = NM_DEFINE_GDBUS_ARG_INFOS (
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("connection", "a{sa{sv}}"),
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("device", "o"),
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("specific_object", "o"),
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("options", "a{sv}"),
|
|
|
|
|
),
|
|
|
|
|
.out_args = NM_DEFINE_GDBUS_ARG_INFOS (
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("path", "o"),
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("active_connection", "o"),
|
|
|
|
|
),
|
|
|
|
|
),
|
|
|
|
|
.handle = impl_manager_add_and_activate_connection,
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
),
|
|
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED (
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT (
|
|
|
|
|
"DeactivateConnection",
|
|
|
|
|
.in_args = NM_DEFINE_GDBUS_ARG_INFOS (
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("active_connection", "o"),
|
|
|
|
|
),
|
|
|
|
|
),
|
|
|
|
|
.handle = impl_manager_deactivate_connection,
|
|
|
|
|
),
|
|
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED (
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT (
|
|
|
|
|
"Sleep",
|
|
|
|
|
.in_args = NM_DEFINE_GDBUS_ARG_INFOS (
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("sleep", "b"),
|
|
|
|
|
),
|
|
|
|
|
),
|
|
|
|
|
.handle = impl_manager_sleep,
|
|
|
|
|
),
|
|
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED (
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT (
|
|
|
|
|
"Enable",
|
|
|
|
|
.in_args = NM_DEFINE_GDBUS_ARG_INFOS (
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("enable", "b"),
|
|
|
|
|
),
|
|
|
|
|
),
|
|
|
|
|
.handle = impl_manager_enable,
|
|
|
|
|
),
|
|
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED (
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT (
|
|
|
|
|
"GetPermissions",
|
|
|
|
|
.out_args = NM_DEFINE_GDBUS_ARG_INFOS (
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("permissions", "a{ss}"),
|
|
|
|
|
),
|
|
|
|
|
),
|
|
|
|
|
.handle = impl_manager_get_permissions,
|
|
|
|
|
),
|
|
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED (
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT (
|
|
|
|
|
"SetLogging",
|
|
|
|
|
.in_args = NM_DEFINE_GDBUS_ARG_INFOS (
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("level", "s"),
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("domains", "s"),
|
|
|
|
|
),
|
|
|
|
|
),
|
|
|
|
|
.handle = impl_manager_set_logging,
|
|
|
|
|
),
|
|
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED (
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT (
|
|
|
|
|
"GetLogging",
|
|
|
|
|
.out_args = NM_DEFINE_GDBUS_ARG_INFOS (
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("level", "s"),
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("domains", "s"),
|
|
|
|
|
),
|
|
|
|
|
),
|
|
|
|
|
.handle = impl_manager_get_logging,
|
|
|
|
|
),
|
|
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED (
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT (
|
|
|
|
|
"CheckConnectivity",
|
|
|
|
|
.out_args = NM_DEFINE_GDBUS_ARG_INFOS (
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("connectivity", "u"),
|
|
|
|
|
),
|
|
|
|
|
),
|
|
|
|
|
.handle = impl_manager_check_connectivity,
|
|
|
|
|
),
|
|
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED (
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT (
|
|
|
|
|
"state",
|
|
|
|
|
.out_args = NM_DEFINE_GDBUS_ARG_INFOS (
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("state", "u"),
|
|
|
|
|
),
|
|
|
|
|
),
|
|
|
|
|
.handle = impl_manager_state,
|
|
|
|
|
),
|
|
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED (
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT (
|
|
|
|
|
"CheckpointCreate",
|
|
|
|
|
.in_args = NM_DEFINE_GDBUS_ARG_INFOS (
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("devices", "ao"),
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("rollback_timeout", "u"),
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("flags", "u"),
|
|
|
|
|
),
|
|
|
|
|
.out_args = NM_DEFINE_GDBUS_ARG_INFOS (
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("checkpoint", "o"),
|
|
|
|
|
),
|
|
|
|
|
),
|
|
|
|
|
.handle = impl_manager_checkpoint_create,
|
|
|
|
|
),
|
|
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED (
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT (
|
|
|
|
|
"CheckpointDestroy",
|
|
|
|
|
.in_args = NM_DEFINE_GDBUS_ARG_INFOS (
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("checkpoint", "o"),
|
|
|
|
|
),
|
|
|
|
|
),
|
|
|
|
|
.handle = impl_manager_checkpoint_destroy,
|
|
|
|
|
),
|
|
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED (
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT (
|
|
|
|
|
"CheckpointRollback",
|
|
|
|
|
.in_args = NM_DEFINE_GDBUS_ARG_INFOS (
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("checkpoint", "o"),
|
|
|
|
|
),
|
|
|
|
|
.out_args = NM_DEFINE_GDBUS_ARG_INFOS (
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("result", "a{su}"),
|
|
|
|
|
),
|
|
|
|
|
),
|
|
|
|
|
.handle = impl_manager_checkpoint_rollback,
|
|
|
|
|
),
|
checkpoint: allow resetting the rollback timeout via D-Bus
This allows to adjust the timeout of an existing checkpoint.
The main usecase of checkpoints, is to have a fail-safe when
configuring the network remotely. By allowing to reset the timeout,
the user can perform a series of actions, and keep bumping the
timeout. That way, the entire series is still guarded by the same
checkpoint, but the user can start with short timeout, and
re-adjust the timeout as he goes along.
The libnm API only implements the async form (at least for now).
Sync methods are fundamentally wrong with D-Bus, and it's probably
not needed. Also, follow glib convenction, where the async form
doesn't have the _async name suffix. Also, accept a D-Bus path
as argument, not a NMCheckpoint instance. The libnm API should
not be more restricted than the underlying D-Bus API. It would
be cumbersome to require the user to lookup the NMCheckpoint
instance first, especially since libnm doesn't provide an efficient
or convenient lookup-by-path method. On the other hand, retrieving
the path from a NMCheckpoint instance is always possible.
2018-03-28 08:09:56 +02:00
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED (
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT (
|
|
|
|
|
"CheckpointAdjustRollbackTimeout",
|
|
|
|
|
.in_args = NM_DEFINE_GDBUS_ARG_INFOS (
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("checkpoint", "o"),
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO ("add_timeout", "u"),
|
|
|
|
|
),
|
|
|
|
|
),
|
|
|
|
|
.handle = impl_manager_checkpoint_adjust_rollback_timeout,
|
|
|
|
|
),
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
),
|
|
|
|
|
.signals = NM_DEFINE_GDBUS_SIGNAL_INFOS (
|
|
|
|
|
&nm_signal_info_property_changed_legacy,
|
|
|
|
|
&signal_info_check_permissions,
|
|
|
|
|
&signal_info_state_changed,
|
|
|
|
|
&signal_info_device_added,
|
|
|
|
|
&signal_info_device_removed,
|
|
|
|
|
),
|
|
|
|
|
.properties = NM_DEFINE_GDBUS_PROPERTY_INFOS (
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE_L ("Devices", "ao", NM_MANAGER_DEVICES),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE_L ("AllDevices", "ao", NM_MANAGER_ALL_DEVICES),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE_L ("Checkpoints", "ao", NM_MANAGER_CHECKPOINTS),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE_L ("NetworkingEnabled", "b", NM_MANAGER_NETWORKING_ENABLED),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READWRITABLE_L ("WirelessEnabled", "b", NM_MANAGER_WIRELESS_ENABLED, NM_AUTH_PERMISSION_ENABLE_DISABLE_WIFI, NM_AUDIT_OP_RADIO_CONTROL),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE_L ("WirelessHardwareEnabled", "b", NM_MANAGER_WIRELESS_HARDWARE_ENABLED),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READWRITABLE_L ("WwanEnabled", "b", NM_MANAGER_WWAN_ENABLED, NM_AUTH_PERMISSION_ENABLE_DISABLE_WWAN, NM_AUDIT_OP_RADIO_CONTROL),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE_L ("WwanHardwareEnabled", "b", NM_MANAGER_WWAN_HARDWARE_ENABLED),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READWRITABLE_L ("WimaxEnabled", "b", NM_MANAGER_WIMAX_ENABLED, NM_AUTH_PERMISSION_ENABLE_DISABLE_WIMAX, NM_AUDIT_OP_RADIO_CONTROL),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE_L ("WimaxHardwareEnabled", "b", NM_MANAGER_WIMAX_HARDWARE_ENABLED),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE_L ("ActiveConnections", "ao", NM_MANAGER_ACTIVE_CONNECTIONS),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE_L ("PrimaryConnection", "o", NM_MANAGER_PRIMARY_CONNECTION),
|
2018-06-22 15:53:46 +02:00
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE_L ("PrimaryConnectionType", "s", NM_MANAGER_PRIMARY_CONNECTION_TYPE),
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE_L ("Metered", "u", NM_MANAGER_METERED),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE_L ("ActivatingConnection", "o", NM_MANAGER_ACTIVATING_CONNECTION),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE_L ("Startup", "b", NM_MANAGER_STARTUP),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE_L ("Version", "s", NM_MANAGER_VERSION),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE_L ("Capabilities", "u", NM_MANAGER_CAPABILITIES),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE_L ("State", "u", NM_MANAGER_STATE),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE_L ("Connectivity", "u", NM_MANAGER_CONNECTIVITY),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE_L ("ConnectivityCheckAvailable", "b", NM_MANAGER_CONNECTIVITY_CHECK_AVAILABLE),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READWRITABLE_L ("ConnectivityCheckEnabled", "b", NM_MANAGER_CONNECTIVITY_CHECK_ENABLED, NM_AUTH_PERMISSION_ENABLE_DISABLE_CONNECTIVITY_CHECK, NM_AUDIT_OP_NET_CONTROL),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READWRITABLE_L ("GlobalDnsConfiguration", "a{sv}", NM_MANAGER_GLOBAL_DNS_CONFIGURATION, NM_AUTH_PERMISSION_SETTINGS_MODIFY_GLOBAL_DNS, NM_AUDIT_OP_NET_CONTROL),
|
|
|
|
|
),
|
|
|
|
|
),
|
|
|
|
|
.legacy_property_changed = TRUE,
|
|
|
|
|
};
|
|
|
|
|
|
2009-06-11 00:39:12 -04:00
|
|
|
static void
|
|
|
|
|
nm_manager_class_init (NMManagerClass *manager_class)
|
|
|
|
|
{
|
|
|
|
|
GObjectClass *object_class = G_OBJECT_CLASS (manager_class);
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
NMDBusObjectClass *dbus_object_class = NM_DBUS_OBJECT_CLASS (manager_class);
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2018-03-13 10:14:06 +01:00
|
|
|
dbus_object_class->export_path = NM_DBUS_EXPORT_PATH_STATIC (NM_DBUS_PATH);
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
dbus_object_class->interface_infos = NM_DBUS_INTERFACE_INFOS (&interface_info_manager);
|
2015-04-03 10:08:52 -04:00
|
|
|
|
2016-01-18 14:18:50 +01:00
|
|
|
object_class->constructed = constructed;
|
2009-06-11 00:39:12 -04:00
|
|
|
object_class->set_property = set_property;
|
|
|
|
|
object_class->get_property = get_property;
|
|
|
|
|
object_class->dispose = dispose;
|
2016-09-23 13:24:10 +02:00
|
|
|
object_class->finalize = finalize;
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2016-04-01 17:34:51 +02:00
|
|
|
obj_properties[PROP_VERSION] =
|
|
|
|
|
g_param_spec_string (NM_MANAGER_VERSION, "", "",
|
|
|
|
|
NULL,
|
|
|
|
|
G_PARAM_READABLE |
|
|
|
|
|
G_PARAM_STATIC_STRINGS);
|
|
|
|
|
|
2016-09-15 23:34:24 +03:00
|
|
|
obj_properties[PROP_CAPABILITIES] =
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_param_spec_variant (NM_MANAGER_CAPABILITIES, "", "",
|
|
|
|
|
G_VARIANT_TYPE ("au"),
|
|
|
|
|
NULL,
|
|
|
|
|
G_PARAM_READABLE |
|
|
|
|
|
G_PARAM_STATIC_STRINGS);
|
2016-09-15 23:34:24 +03:00
|
|
|
|
2016-04-01 17:34:51 +02:00
|
|
|
obj_properties[PROP_STATE] =
|
|
|
|
|
g_param_spec_uint (NM_MANAGER_STATE, "", "",
|
|
|
|
|
0, NM_STATE_DISCONNECTED, 0,
|
|
|
|
|
G_PARAM_READABLE |
|
|
|
|
|
G_PARAM_STATIC_STRINGS);
|
|
|
|
|
|
|
|
|
|
obj_properties[PROP_STARTUP] =
|
|
|
|
|
g_param_spec_boolean (NM_MANAGER_STARTUP, "", "",
|
|
|
|
|
TRUE,
|
|
|
|
|
G_PARAM_READABLE |
|
|
|
|
|
G_PARAM_STATIC_STRINGS);
|
|
|
|
|
|
|
|
|
|
obj_properties[PROP_NETWORKING_ENABLED] =
|
|
|
|
|
g_param_spec_boolean (NM_MANAGER_NETWORKING_ENABLED, "", "",
|
|
|
|
|
TRUE,
|
2015-11-06 16:45:27 +01:00
|
|
|
G_PARAM_READABLE |
|
2016-04-01 17:34:51 +02:00
|
|
|
G_PARAM_STATIC_STRINGS);
|
|
|
|
|
|
|
|
|
|
obj_properties[PROP_WIRELESS_ENABLED] =
|
|
|
|
|
g_param_spec_boolean (NM_MANAGER_WIRELESS_ENABLED, "", "",
|
|
|
|
|
TRUE,
|
|
|
|
|
G_PARAM_READWRITE |
|
|
|
|
|
G_PARAM_STATIC_STRINGS);
|
|
|
|
|
|
|
|
|
|
obj_properties[PROP_WIRELESS_HARDWARE_ENABLED] =
|
|
|
|
|
g_param_spec_boolean (NM_MANAGER_WIRELESS_HARDWARE_ENABLED, "", "",
|
|
|
|
|
TRUE,
|
|
|
|
|
G_PARAM_READABLE |
|
|
|
|
|
G_PARAM_STATIC_STRINGS);
|
|
|
|
|
|
|
|
|
|
obj_properties[PROP_WWAN_ENABLED] =
|
|
|
|
|
g_param_spec_boolean (NM_MANAGER_WWAN_ENABLED, "", "",
|
|
|
|
|
TRUE,
|
|
|
|
|
G_PARAM_READWRITE |
|
|
|
|
|
G_PARAM_STATIC_STRINGS);
|
|
|
|
|
|
|
|
|
|
obj_properties[PROP_WWAN_HARDWARE_ENABLED] =
|
|
|
|
|
g_param_spec_boolean (NM_MANAGER_WWAN_HARDWARE_ENABLED, "", "",
|
|
|
|
|
TRUE,
|
|
|
|
|
G_PARAM_READABLE |
|
|
|
|
|
G_PARAM_STATIC_STRINGS);
|
|
|
|
|
|
|
|
|
|
obj_properties[PROP_WIMAX_ENABLED] =
|
|
|
|
|
g_param_spec_boolean (NM_MANAGER_WIMAX_ENABLED, "", "",
|
|
|
|
|
TRUE,
|
|
|
|
|
G_PARAM_READWRITE |
|
|
|
|
|
G_PARAM_STATIC_STRINGS);
|
|
|
|
|
|
|
|
|
|
obj_properties[PROP_WIMAX_HARDWARE_ENABLED] =
|
|
|
|
|
g_param_spec_boolean (NM_MANAGER_WIMAX_HARDWARE_ENABLED, "", "",
|
|
|
|
|
TRUE,
|
|
|
|
|
G_PARAM_READABLE |
|
|
|
|
|
G_PARAM_STATIC_STRINGS);
|
|
|
|
|
|
|
|
|
|
obj_properties[PROP_ACTIVE_CONNECTIONS] =
|
|
|
|
|
g_param_spec_boxed (NM_MANAGER_ACTIVE_CONNECTIONS, "", "",
|
|
|
|
|
G_TYPE_STRV,
|
|
|
|
|
G_PARAM_READABLE |
|
|
|
|
|
G_PARAM_STATIC_STRINGS);
|
|
|
|
|
|
|
|
|
|
obj_properties[PROP_CONNECTIVITY] =
|
|
|
|
|
g_param_spec_uint (NM_MANAGER_CONNECTIVITY, "", "",
|
|
|
|
|
NM_CONNECTIVITY_UNKNOWN, NM_CONNECTIVITY_FULL, NM_CONNECTIVITY_UNKNOWN,
|
|
|
|
|
G_PARAM_READABLE |
|
|
|
|
|
G_PARAM_STATIC_STRINGS);
|
|
|
|
|
|
2017-08-09 15:20:04 +08:00
|
|
|
obj_properties[PROP_CONNECTIVITY_CHECK_AVAILABLE] =
|
|
|
|
|
g_param_spec_boolean (NM_MANAGER_CONNECTIVITY_CHECK_AVAILABLE, "", "",
|
|
|
|
|
TRUE,
|
|
|
|
|
G_PARAM_READABLE |
|
|
|
|
|
G_PARAM_STATIC_STRINGS);
|
|
|
|
|
|
|
|
|
|
obj_properties[PROP_CONNECTIVITY_CHECK_ENABLED] =
|
|
|
|
|
g_param_spec_boolean (NM_MANAGER_CONNECTIVITY_CHECK_ENABLED, "", "",
|
|
|
|
|
TRUE,
|
|
|
|
|
G_PARAM_READWRITE |
|
|
|
|
|
G_PARAM_STATIC_STRINGS);
|
|
|
|
|
|
2016-04-01 17:34:51 +02:00
|
|
|
obj_properties[PROP_PRIMARY_CONNECTION] =
|
|
|
|
|
g_param_spec_string (NM_MANAGER_PRIMARY_CONNECTION, "", "",
|
|
|
|
|
NULL,
|
|
|
|
|
G_PARAM_READABLE |
|
|
|
|
|
G_PARAM_STATIC_STRINGS);
|
|
|
|
|
|
|
|
|
|
obj_properties[PROP_PRIMARY_CONNECTION_TYPE] =
|
|
|
|
|
g_param_spec_string (NM_MANAGER_PRIMARY_CONNECTION_TYPE, "", "",
|
|
|
|
|
NULL,
|
|
|
|
|
G_PARAM_READABLE |
|
|
|
|
|
G_PARAM_STATIC_STRINGS);
|
|
|
|
|
|
|
|
|
|
obj_properties[PROP_ACTIVATING_CONNECTION] =
|
|
|
|
|
g_param_spec_string (NM_MANAGER_ACTIVATING_CONNECTION, "", "",
|
|
|
|
|
NULL,
|
|
|
|
|
G_PARAM_READABLE |
|
|
|
|
|
G_PARAM_STATIC_STRINGS);
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2010-05-22 08:55:30 -07:00
|
|
|
/* Sleeping is not exported over D-Bus */
|
2016-04-01 17:34:51 +02:00
|
|
|
obj_properties[PROP_SLEEPING] =
|
|
|
|
|
g_param_spec_boolean (NM_MANAGER_SLEEPING, "", "",
|
|
|
|
|
FALSE,
|
|
|
|
|
G_PARAM_READABLE |
|
|
|
|
|
G_PARAM_STATIC_STRINGS);
|
|
|
|
|
|
|
|
|
|
obj_properties[PROP_DEVICES] =
|
|
|
|
|
g_param_spec_boxed (NM_MANAGER_DEVICES, "", "",
|
|
|
|
|
G_TYPE_STRV,
|
|
|
|
|
G_PARAM_READABLE |
|
|
|
|
|
G_PARAM_STATIC_STRINGS);
|
2014-01-08 12:18:33 -06:00
|
|
|
|
2015-06-03 09:15:24 +02:00
|
|
|
/**
|
|
|
|
|
* NMManager:metered:
|
|
|
|
|
*
|
|
|
|
|
* Whether the connectivity is metered.
|
|
|
|
|
*
|
|
|
|
|
* Since: 1.2
|
|
|
|
|
**/
|
2016-04-01 17:34:51 +02:00
|
|
|
obj_properties[PROP_METERED] =
|
|
|
|
|
g_param_spec_uint (NM_MANAGER_METERED, "", "",
|
|
|
|
|
0, G_MAXUINT32, NM_METERED_UNKNOWN,
|
|
|
|
|
G_PARAM_READABLE |
|
|
|
|
|
G_PARAM_STATIC_STRINGS);
|
2015-06-03 09:15:24 +02:00
|
|
|
|
2015-07-03 11:06:39 +02:00
|
|
|
/**
|
|
|
|
|
* NMManager:global-dns-configuration:
|
|
|
|
|
*
|
|
|
|
|
* The global DNS configuration.
|
|
|
|
|
*
|
|
|
|
|
* Since: 1.2
|
|
|
|
|
**/
|
2016-04-01 17:34:51 +02:00
|
|
|
obj_properties[PROP_GLOBAL_DNS_CONFIGURATION] =
|
|
|
|
|
g_param_spec_variant (NM_MANAGER_GLOBAL_DNS_CONFIGURATION, "", "",
|
|
|
|
|
G_VARIANT_TYPE ("a{sv}"),
|
|
|
|
|
NULL,
|
|
|
|
|
G_PARAM_READWRITE |
|
|
|
|
|
G_PARAM_STATIC_STRINGS);
|
2015-07-03 11:06:39 +02:00
|
|
|
|
2014-10-06 11:21:54 -05:00
|
|
|
/**
|
|
|
|
|
* NMManager:all-devices:
|
|
|
|
|
*
|
|
|
|
|
* All devices, including those that are not realized.
|
|
|
|
|
*
|
|
|
|
|
* Since: 1.2
|
|
|
|
|
**/
|
2016-04-01 17:34:51 +02:00
|
|
|
obj_properties[PROP_ALL_DEVICES] =
|
|
|
|
|
g_param_spec_boxed (NM_MANAGER_ALL_DEVICES, "", "",
|
|
|
|
|
G_TYPE_STRV,
|
|
|
|
|
G_PARAM_READABLE |
|
|
|
|
|
G_PARAM_STATIC_STRINGS);
|
|
|
|
|
|
2017-10-21 16:05:14 +02:00
|
|
|
obj_properties[PROP_CHECKPOINTS] =
|
|
|
|
|
g_param_spec_boxed (NM_MANAGER_CHECKPOINTS, "", "",
|
|
|
|
|
G_TYPE_STRV,
|
|
|
|
|
G_PARAM_READABLE |
|
|
|
|
|
G_PARAM_STATIC_STRINGS);
|
|
|
|
|
|
2016-04-01 17:34:51 +02:00
|
|
|
g_object_class_install_properties (object_class, _PROPERTY_ENUMS_LAST, obj_properties);
|
2014-10-06 11:21:54 -05:00
|
|
|
|
2009-06-11 00:39:12 -04:00
|
|
|
/* signals */
|
2014-10-06 11:21:54 -05:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
/* emitted only for realized devices */
|
2009-06-11 00:39:12 -04:00
|
|
|
signals[DEVICE_ADDED] =
|
2016-04-04 14:17:09 +02:00
|
|
|
g_signal_new (NM_MANAGER_DEVICE_ADDED,
|
2016-04-01 17:34:51 +02:00
|
|
|
G_OBJECT_CLASS_TYPE (object_class),
|
|
|
|
|
G_SIGNAL_RUN_FIRST,
|
2016-04-28 14:05:11 +02:00
|
|
|
0, NULL, NULL, NULL,
|
2016-04-01 17:34:51 +02:00
|
|
|
G_TYPE_NONE, 1, NM_TYPE_DEVICE);
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2014-10-06 11:21:54 -05:00
|
|
|
/* Emitted for both realized devices and placeholder devices */
|
|
|
|
|
signals[INTERNAL_DEVICE_ADDED] =
|
2016-04-04 14:17:09 +02:00
|
|
|
g_signal_new (NM_MANAGER_INTERNAL_DEVICE_ADDED,
|
2016-04-01 17:34:51 +02:00
|
|
|
G_OBJECT_CLASS_TYPE (object_class),
|
|
|
|
|
G_SIGNAL_RUN_FIRST, 0,
|
|
|
|
|
NULL, NULL, NULL,
|
|
|
|
|
G_TYPE_NONE, 1, G_TYPE_OBJECT);
|
2014-10-06 11:21:54 -05:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
/* emitted only for realized devices when a device
|
2016-09-14 14:15:24 +02:00
|
|
|
* becomes unrealized or removed */
|
2009-06-11 00:39:12 -04:00
|
|
|
signals[DEVICE_REMOVED] =
|
2016-04-04 14:17:09 +02:00
|
|
|
g_signal_new (NM_MANAGER_DEVICE_REMOVED,
|
2016-04-01 17:34:51 +02:00
|
|
|
G_OBJECT_CLASS_TYPE (object_class),
|
|
|
|
|
G_SIGNAL_RUN_FIRST,
|
2016-04-28 14:05:11 +02:00
|
|
|
0, NULL, NULL, NULL,
|
2016-04-01 17:34:51 +02:00
|
|
|
G_TYPE_NONE, 1, NM_TYPE_DEVICE);
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2014-10-06 11:21:54 -05:00
|
|
|
/* Emitted for both realized devices and placeholder devices */
|
|
|
|
|
signals[INTERNAL_DEVICE_REMOVED] =
|
2016-04-04 14:17:09 +02:00
|
|
|
g_signal_new (NM_MANAGER_INTERNAL_DEVICE_REMOVED,
|
2016-04-01 17:34:51 +02:00
|
|
|
G_OBJECT_CLASS_TYPE (object_class),
|
|
|
|
|
G_SIGNAL_RUN_FIRST, 0,
|
|
|
|
|
NULL, NULL, NULL,
|
|
|
|
|
G_TYPE_NONE, 1, G_TYPE_OBJECT);
|
2014-10-06 11:21:54 -05:00
|
|
|
|
2012-08-22 18:33:17 -05:00
|
|
|
signals[ACTIVE_CONNECTION_ADDED] =
|
2016-04-01 17:34:51 +02:00
|
|
|
g_signal_new (NM_MANAGER_ACTIVE_CONNECTION_ADDED,
|
|
|
|
|
G_OBJECT_CLASS_TYPE (object_class),
|
|
|
|
|
G_SIGNAL_RUN_FIRST,
|
|
|
|
|
0, NULL, NULL, NULL,
|
|
|
|
|
G_TYPE_NONE, 1, NM_TYPE_ACTIVE_CONNECTION);
|
2012-08-22 18:33:17 -05:00
|
|
|
|
|
|
|
|
signals[ACTIVE_CONNECTION_REMOVED] =
|
2016-04-01 17:34:51 +02:00
|
|
|
g_signal_new (NM_MANAGER_ACTIVE_CONNECTION_REMOVED,
|
|
|
|
|
G_OBJECT_CLASS_TYPE (object_class),
|
|
|
|
|
G_SIGNAL_RUN_FIRST,
|
|
|
|
|
0, NULL, NULL, NULL,
|
|
|
|
|
G_TYPE_NONE, 1, NM_TYPE_ACTIVE_CONNECTION);
|
2012-08-22 18:33:17 -05:00
|
|
|
|
2014-10-29 09:12:18 -05:00
|
|
|
signals[CONFIGURE_QUIT] =
|
2016-04-01 17:34:51 +02:00
|
|
|
g_signal_new (NM_MANAGER_CONFIGURE_QUIT,
|
|
|
|
|
G_OBJECT_CLASS_TYPE (object_class),
|
|
|
|
|
G_SIGNAL_RUN_FIRST,
|
|
|
|
|
0, NULL, NULL, NULL,
|
|
|
|
|
G_TYPE_NONE, 0);
|
2009-06-11 00:39:12 -04:00
|
|
|
}
|