NetworkManager/libnm/nm-client.h

519 lines
23 KiB
C
Raw Normal View History

// SPDX-License-Identifier: LGPL-2.1+
/*
* Copyright (C) 2007 - 2008 Novell, Inc.
* Copyright (C) 2007 - 2014 Red Hat, Inc.
*/
#ifndef __NM_CLIENT_H__
#define __NM_CLIENT_H__
#if !defined (__NETWORKMANAGER_H_INSIDE__) && !defined (NETWORKMANAGER_COMPILATION)
#error "Only <NetworkManager.h> can be included directly."
#endif
#include "nm-types.h"
G_BEGIN_DECLS
/**
* NMClientInstanceFlags:
* @NM_CLIENT_INSTANCE_FLAGS_NONE: special value to indicate no flags.
* @NM_CLIENT_INSTANCE_FLAGS_NO_AUTO_FETCH_PERMISSIONS: by default, NMClient
* will fetch the permissions via "GetPermissions" and refetch them when
* "CheckPermissions" signal gets received. By setting this flag, this behavior
* can be disabled. You can toggle this flag to enable and disable automatic
* fetching of the permissions. Watch also nm_client_get_permissions_state()
* to know whether the permissions are up to date.
*
* Since: 1.24
*/
typedef enum { /*< flags >*/
NM_CLIENT_INSTANCE_FLAGS_NONE = 0,
NM_CLIENT_INSTANCE_FLAGS_NO_AUTO_FETCH_PERMISSIONS = 1,
} NMClientInstanceFlags;
#define NM_TYPE_CLIENT (nm_client_get_type ())
#define NM_CLIENT(obj) (G_TYPE_CHECK_INSTANCE_CAST ((obj), NM_TYPE_CLIENT, NMClient))
#define NM_CLIENT_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST ((klass), NM_TYPE_CLIENT, NMClientClass))
#define NM_IS_CLIENT(obj) (G_TYPE_CHECK_INSTANCE_TYPE ((obj), NM_TYPE_CLIENT))
#define NM_IS_CLIENT_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE ((klass), NM_TYPE_CLIENT))
#define NM_CLIENT_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS ((obj), NM_TYPE_CLIENT, NMClientClass))
#define NM_CLIENT_VERSION "version"
#define NM_CLIENT_STATE "state"
#define NM_CLIENT_STARTUP "startup"
#define NM_CLIENT_NM_RUNNING "nm-running"
#define NM_CLIENT_DBUS_CONNECTION "dbus-connection"
#define NM_CLIENT_DBUS_NAME_OWNER "dbus-name-owner"
#define NM_CLIENT_INSTANCE_FLAGS "instance-flags"
libnm: deprecate synchronous/blocking API in libnm Note that D-Bus is fundamentally asynchronous. Doing blocking calls on top of D-Bus is odd, especially for libnm's NMClient. That is because NMClient essentially is a client-side cache of the objects from the D-Bus interface. This cache should be filled exclusively by (asynchronous) D-Bus events (PropertiesChanged). So, making a blocking D-Bus call means to wait for a response and return it, while queuing all messages that are received in the meantime. Basically there are three ways how a synchronous API on NMClient could behave: 1) the call just calls g_dbus_connection_call_sync(). This means that libnm sends a D-Bus request via GDBusConnection, and blockingly waits for the response. All D-Bus messages that get received in the meantime are queued in the GMainContext that belongs to NMClient. That means, none of these D-Bus events are processed until we iterate the GMainContext after the call returns. The effect is, that NMClient (and all cached objects in there) are unaffected by the D-Bus request. Most of the synchronous API calls in libnm are of this kind. The problem is that the strict ordering of D-Bus events gets violated. For some API this is not an immediate problem. Take for example nm_device_wifi_request_scan(). The call merely blockingly tells NetworkManager to start scanning, but since NetworkManager's D-Bus API does not directly expose any state that tells whether we are currently scanning, this out of order processing of the D-Bus request is a small issue. The problem is more obvious for nm_client_networking_set_enabled(). After calling it, NM_CLIENT_NETWORKING_ENABLED is still unaffected and unchanged, because the PropertiesChanged signal from D-Bus is not yet processed. This means, while you make such a blocking call, NMClient's state does not change. But usually you perform the synchronous call to change some state. In this form, the blocking call is not useful, because NMClient only changes the state after iterating the GMainContext, and not after the blocking call returns. 2) like 1), but after making the blocking g_dbus_connection_call_sync(), update the NMClient cache artificially. This is what nm_manager_check_connectivity() does, to "fix" bgo#784629. This also has the problem of out-of-order events, but it kinda solves the problem of not changing the state during the blocking call. But it does so by hacking the state of the cache. I think this is really wrong because the state should only be updated from the ordered stream of D-Bus messages (PropertiesChanged signal and similar). When libnm decides to modify the state, there may be already D-Bus messages queued that affect this very state. 3) instead of calling g_dbus_connection_call_sync(), use the asynchronous g_dbus_connection_call(). If we would use a sepaate GMainContext for all D-Bus related calls, we could ensure that while we block for the response, we iterate that internal main context. This might be nice, because all events are processed in order and after the blocking call returns, the NMClient state is up to date. The are problems however: current blocking API does not do this, so it's a significant change in behavior. Also, it might be unexpected to the user that during the blocking call the entire content of NMClient's cache might change and all pointers to the cache might be invalidated. Also, of course NMClient would invoke signals for all the changes that happen. Another problem is that this would be more effort to implement and it involves a small performance overhead for all D-Bus related calls (because we have to serialize all events in an internal GMainContext first and then invoke them on the caller's context). Also, if the users wants this behavior, they could implement it themself by running libnm in their own GMainContext. Note that libnm might have bugs to make that really working, but that should be fixed instead of adding such synchrnous API behavior. Read also [1], for why blocking calls are wrong. [1] https://smcv.pseudorandom.co.uk/2008/11/nonblocking/ So, all possible behaviors for synchronous API have severe behavioural issues. Mark all this API as deprecated. Also, this serves the purpose of identifying blocking D-Bus calls in libnm. Note that "deprecated" here does not really mean that the API is going to be removed. We don't break API. The user may: - continue to use this API. It's deprecated, awkward and discouraged, but if it works, by all means use it. - use asynchronous API. That's the only sensible way to use D-Bus. If libnm lacks a certain asynchronous counterpart, it should be added. - use GDBusConnection directly. There really isn't anything wrong with D-Bus or GDBusConnection. This deprecated API is just a wrapper around g_dbus_connection_call_sync(). You may call it directly without feeling dirty. --- The only other remainging API is the synchronous GInitable call for NMClient. That is an entirely separate beast and not particularly wrong (from an API point of view). Note that synchronous API in NMSecretAgentOld, NMVpnPluginOld and NMVpnServicePlugin as not deprecated here. These types are not part of the D-Bus cache and while they have similar issues, it's less severe because they have less state.
2019-09-04 13:58:43 +02:00
_NM_DEPRECATED_SYNC_WRITABLE_PROPERTY
#define NM_CLIENT_NETWORKING_ENABLED "networking-enabled"
libnm: deprecate synchronous/blocking API in libnm Note that D-Bus is fundamentally asynchronous. Doing blocking calls on top of D-Bus is odd, especially for libnm's NMClient. That is because NMClient essentially is a client-side cache of the objects from the D-Bus interface. This cache should be filled exclusively by (asynchronous) D-Bus events (PropertiesChanged). So, making a blocking D-Bus call means to wait for a response and return it, while queuing all messages that are received in the meantime. Basically there are three ways how a synchronous API on NMClient could behave: 1) the call just calls g_dbus_connection_call_sync(). This means that libnm sends a D-Bus request via GDBusConnection, and blockingly waits for the response. All D-Bus messages that get received in the meantime are queued in the GMainContext that belongs to NMClient. That means, none of these D-Bus events are processed until we iterate the GMainContext after the call returns. The effect is, that NMClient (and all cached objects in there) are unaffected by the D-Bus request. Most of the synchronous API calls in libnm are of this kind. The problem is that the strict ordering of D-Bus events gets violated. For some API this is not an immediate problem. Take for example nm_device_wifi_request_scan(). The call merely blockingly tells NetworkManager to start scanning, but since NetworkManager's D-Bus API does not directly expose any state that tells whether we are currently scanning, this out of order processing of the D-Bus request is a small issue. The problem is more obvious for nm_client_networking_set_enabled(). After calling it, NM_CLIENT_NETWORKING_ENABLED is still unaffected and unchanged, because the PropertiesChanged signal from D-Bus is not yet processed. This means, while you make such a blocking call, NMClient's state does not change. But usually you perform the synchronous call to change some state. In this form, the blocking call is not useful, because NMClient only changes the state after iterating the GMainContext, and not after the blocking call returns. 2) like 1), but after making the blocking g_dbus_connection_call_sync(), update the NMClient cache artificially. This is what nm_manager_check_connectivity() does, to "fix" bgo#784629. This also has the problem of out-of-order events, but it kinda solves the problem of not changing the state during the blocking call. But it does so by hacking the state of the cache. I think this is really wrong because the state should only be updated from the ordered stream of D-Bus messages (PropertiesChanged signal and similar). When libnm decides to modify the state, there may be already D-Bus messages queued that affect this very state. 3) instead of calling g_dbus_connection_call_sync(), use the asynchronous g_dbus_connection_call(). If we would use a sepaate GMainContext for all D-Bus related calls, we could ensure that while we block for the response, we iterate that internal main context. This might be nice, because all events are processed in order and after the blocking call returns, the NMClient state is up to date. The are problems however: current blocking API does not do this, so it's a significant change in behavior. Also, it might be unexpected to the user that during the blocking call the entire content of NMClient's cache might change and all pointers to the cache might be invalidated. Also, of course NMClient would invoke signals for all the changes that happen. Another problem is that this would be more effort to implement and it involves a small performance overhead for all D-Bus related calls (because we have to serialize all events in an internal GMainContext first and then invoke them on the caller's context). Also, if the users wants this behavior, they could implement it themself by running libnm in their own GMainContext. Note that libnm might have bugs to make that really working, but that should be fixed instead of adding such synchrnous API behavior. Read also [1], for why blocking calls are wrong. [1] https://smcv.pseudorandom.co.uk/2008/11/nonblocking/ So, all possible behaviors for synchronous API have severe behavioural issues. Mark all this API as deprecated. Also, this serves the purpose of identifying blocking D-Bus calls in libnm. Note that "deprecated" here does not really mean that the API is going to be removed. We don't break API. The user may: - continue to use this API. It's deprecated, awkward and discouraged, but if it works, by all means use it. - use asynchronous API. That's the only sensible way to use D-Bus. If libnm lacks a certain asynchronous counterpart, it should be added. - use GDBusConnection directly. There really isn't anything wrong with D-Bus or GDBusConnection. This deprecated API is just a wrapper around g_dbus_connection_call_sync(). You may call it directly without feeling dirty. --- The only other remainging API is the synchronous GInitable call for NMClient. That is an entirely separate beast and not particularly wrong (from an API point of view). Note that synchronous API in NMSecretAgentOld, NMVpnPluginOld and NMVpnServicePlugin as not deprecated here. These types are not part of the D-Bus cache and while they have similar issues, it's less severe because they have less state.
2019-09-04 13:58:43 +02:00
_NM_DEPRECATED_SYNC_WRITABLE_PROPERTY
#define NM_CLIENT_WIRELESS_ENABLED "wireless-enabled"
libnm: deprecate synchronous/blocking API in libnm Note that D-Bus is fundamentally asynchronous. Doing blocking calls on top of D-Bus is odd, especially for libnm's NMClient. That is because NMClient essentially is a client-side cache of the objects from the D-Bus interface. This cache should be filled exclusively by (asynchronous) D-Bus events (PropertiesChanged). So, making a blocking D-Bus call means to wait for a response and return it, while queuing all messages that are received in the meantime. Basically there are three ways how a synchronous API on NMClient could behave: 1) the call just calls g_dbus_connection_call_sync(). This means that libnm sends a D-Bus request via GDBusConnection, and blockingly waits for the response. All D-Bus messages that get received in the meantime are queued in the GMainContext that belongs to NMClient. That means, none of these D-Bus events are processed until we iterate the GMainContext after the call returns. The effect is, that NMClient (and all cached objects in there) are unaffected by the D-Bus request. Most of the synchronous API calls in libnm are of this kind. The problem is that the strict ordering of D-Bus events gets violated. For some API this is not an immediate problem. Take for example nm_device_wifi_request_scan(). The call merely blockingly tells NetworkManager to start scanning, but since NetworkManager's D-Bus API does not directly expose any state that tells whether we are currently scanning, this out of order processing of the D-Bus request is a small issue. The problem is more obvious for nm_client_networking_set_enabled(). After calling it, NM_CLIENT_NETWORKING_ENABLED is still unaffected and unchanged, because the PropertiesChanged signal from D-Bus is not yet processed. This means, while you make such a blocking call, NMClient's state does not change. But usually you perform the synchronous call to change some state. In this form, the blocking call is not useful, because NMClient only changes the state after iterating the GMainContext, and not after the blocking call returns. 2) like 1), but after making the blocking g_dbus_connection_call_sync(), update the NMClient cache artificially. This is what nm_manager_check_connectivity() does, to "fix" bgo#784629. This also has the problem of out-of-order events, but it kinda solves the problem of not changing the state during the blocking call. But it does so by hacking the state of the cache. I think this is really wrong because the state should only be updated from the ordered stream of D-Bus messages (PropertiesChanged signal and similar). When libnm decides to modify the state, there may be already D-Bus messages queued that affect this very state. 3) instead of calling g_dbus_connection_call_sync(), use the asynchronous g_dbus_connection_call(). If we would use a sepaate GMainContext for all D-Bus related calls, we could ensure that while we block for the response, we iterate that internal main context. This might be nice, because all events are processed in order and after the blocking call returns, the NMClient state is up to date. The are problems however: current blocking API does not do this, so it's a significant change in behavior. Also, it might be unexpected to the user that during the blocking call the entire content of NMClient's cache might change and all pointers to the cache might be invalidated. Also, of course NMClient would invoke signals for all the changes that happen. Another problem is that this would be more effort to implement and it involves a small performance overhead for all D-Bus related calls (because we have to serialize all events in an internal GMainContext first and then invoke them on the caller's context). Also, if the users wants this behavior, they could implement it themself by running libnm in their own GMainContext. Note that libnm might have bugs to make that really working, but that should be fixed instead of adding such synchrnous API behavior. Read also [1], for why blocking calls are wrong. [1] https://smcv.pseudorandom.co.uk/2008/11/nonblocking/ So, all possible behaviors for synchronous API have severe behavioural issues. Mark all this API as deprecated. Also, this serves the purpose of identifying blocking D-Bus calls in libnm. Note that "deprecated" here does not really mean that the API is going to be removed. We don't break API. The user may: - continue to use this API. It's deprecated, awkward and discouraged, but if it works, by all means use it. - use asynchronous API. That's the only sensible way to use D-Bus. If libnm lacks a certain asynchronous counterpart, it should be added. - use GDBusConnection directly. There really isn't anything wrong with D-Bus or GDBusConnection. This deprecated API is just a wrapper around g_dbus_connection_call_sync(). You may call it directly without feeling dirty. --- The only other remainging API is the synchronous GInitable call for NMClient. That is an entirely separate beast and not particularly wrong (from an API point of view). Note that synchronous API in NMSecretAgentOld, NMVpnPluginOld and NMVpnServicePlugin as not deprecated here. These types are not part of the D-Bus cache and while they have similar issues, it's less severe because they have less state.
2019-09-04 13:58:43 +02:00
_NM_DEPRECATED_SYNC_WRITABLE_PROPERTY
#define NM_CLIENT_WWAN_ENABLED "wwan-enabled"
libnm: deprecate synchronous/blocking API in libnm Note that D-Bus is fundamentally asynchronous. Doing blocking calls on top of D-Bus is odd, especially for libnm's NMClient. That is because NMClient essentially is a client-side cache of the objects from the D-Bus interface. This cache should be filled exclusively by (asynchronous) D-Bus events (PropertiesChanged). So, making a blocking D-Bus call means to wait for a response and return it, while queuing all messages that are received in the meantime. Basically there are three ways how a synchronous API on NMClient could behave: 1) the call just calls g_dbus_connection_call_sync(). This means that libnm sends a D-Bus request via GDBusConnection, and blockingly waits for the response. All D-Bus messages that get received in the meantime are queued in the GMainContext that belongs to NMClient. That means, none of these D-Bus events are processed until we iterate the GMainContext after the call returns. The effect is, that NMClient (and all cached objects in there) are unaffected by the D-Bus request. Most of the synchronous API calls in libnm are of this kind. The problem is that the strict ordering of D-Bus events gets violated. For some API this is not an immediate problem. Take for example nm_device_wifi_request_scan(). The call merely blockingly tells NetworkManager to start scanning, but since NetworkManager's D-Bus API does not directly expose any state that tells whether we are currently scanning, this out of order processing of the D-Bus request is a small issue. The problem is more obvious for nm_client_networking_set_enabled(). After calling it, NM_CLIENT_NETWORKING_ENABLED is still unaffected and unchanged, because the PropertiesChanged signal from D-Bus is not yet processed. This means, while you make such a blocking call, NMClient's state does not change. But usually you perform the synchronous call to change some state. In this form, the blocking call is not useful, because NMClient only changes the state after iterating the GMainContext, and not after the blocking call returns. 2) like 1), but after making the blocking g_dbus_connection_call_sync(), update the NMClient cache artificially. This is what nm_manager_check_connectivity() does, to "fix" bgo#784629. This also has the problem of out-of-order events, but it kinda solves the problem of not changing the state during the blocking call. But it does so by hacking the state of the cache. I think this is really wrong because the state should only be updated from the ordered stream of D-Bus messages (PropertiesChanged signal and similar). When libnm decides to modify the state, there may be already D-Bus messages queued that affect this very state. 3) instead of calling g_dbus_connection_call_sync(), use the asynchronous g_dbus_connection_call(). If we would use a sepaate GMainContext for all D-Bus related calls, we could ensure that while we block for the response, we iterate that internal main context. This might be nice, because all events are processed in order and after the blocking call returns, the NMClient state is up to date. The are problems however: current blocking API does not do this, so it's a significant change in behavior. Also, it might be unexpected to the user that during the blocking call the entire content of NMClient's cache might change and all pointers to the cache might be invalidated. Also, of course NMClient would invoke signals for all the changes that happen. Another problem is that this would be more effort to implement and it involves a small performance overhead for all D-Bus related calls (because we have to serialize all events in an internal GMainContext first and then invoke them on the caller's context). Also, if the users wants this behavior, they could implement it themself by running libnm in their own GMainContext. Note that libnm might have bugs to make that really working, but that should be fixed instead of adding such synchrnous API behavior. Read also [1], for why blocking calls are wrong. [1] https://smcv.pseudorandom.co.uk/2008/11/nonblocking/ So, all possible behaviors for synchronous API have severe behavioural issues. Mark all this API as deprecated. Also, this serves the purpose of identifying blocking D-Bus calls in libnm. Note that "deprecated" here does not really mean that the API is going to be removed. We don't break API. The user may: - continue to use this API. It's deprecated, awkward and discouraged, but if it works, by all means use it. - use asynchronous API. That's the only sensible way to use D-Bus. If libnm lacks a certain asynchronous counterpart, it should be added. - use GDBusConnection directly. There really isn't anything wrong with D-Bus or GDBusConnection. This deprecated API is just a wrapper around g_dbus_connection_call_sync(). You may call it directly without feeling dirty. --- The only other remainging API is the synchronous GInitable call for NMClient. That is an entirely separate beast and not particularly wrong (from an API point of view). Note that synchronous API in NMSecretAgentOld, NMVpnPluginOld and NMVpnServicePlugin as not deprecated here. These types are not part of the D-Bus cache and while they have similar issues, it's less severe because they have less state.
2019-09-04 13:58:43 +02:00
_NM_DEPRECATED_SYNC_WRITABLE_PROPERTY
#define NM_CLIENT_WIMAX_ENABLED "wimax-enabled"
libnm: deprecate synchronous/blocking API in libnm Note that D-Bus is fundamentally asynchronous. Doing blocking calls on top of D-Bus is odd, especially for libnm's NMClient. That is because NMClient essentially is a client-side cache of the objects from the D-Bus interface. This cache should be filled exclusively by (asynchronous) D-Bus events (PropertiesChanged). So, making a blocking D-Bus call means to wait for a response and return it, while queuing all messages that are received in the meantime. Basically there are three ways how a synchronous API on NMClient could behave: 1) the call just calls g_dbus_connection_call_sync(). This means that libnm sends a D-Bus request via GDBusConnection, and blockingly waits for the response. All D-Bus messages that get received in the meantime are queued in the GMainContext that belongs to NMClient. That means, none of these D-Bus events are processed until we iterate the GMainContext after the call returns. The effect is, that NMClient (and all cached objects in there) are unaffected by the D-Bus request. Most of the synchronous API calls in libnm are of this kind. The problem is that the strict ordering of D-Bus events gets violated. For some API this is not an immediate problem. Take for example nm_device_wifi_request_scan(). The call merely blockingly tells NetworkManager to start scanning, but since NetworkManager's D-Bus API does not directly expose any state that tells whether we are currently scanning, this out of order processing of the D-Bus request is a small issue. The problem is more obvious for nm_client_networking_set_enabled(). After calling it, NM_CLIENT_NETWORKING_ENABLED is still unaffected and unchanged, because the PropertiesChanged signal from D-Bus is not yet processed. This means, while you make such a blocking call, NMClient's state does not change. But usually you perform the synchronous call to change some state. In this form, the blocking call is not useful, because NMClient only changes the state after iterating the GMainContext, and not after the blocking call returns. 2) like 1), but after making the blocking g_dbus_connection_call_sync(), update the NMClient cache artificially. This is what nm_manager_check_connectivity() does, to "fix" bgo#784629. This also has the problem of out-of-order events, but it kinda solves the problem of not changing the state during the blocking call. But it does so by hacking the state of the cache. I think this is really wrong because the state should only be updated from the ordered stream of D-Bus messages (PropertiesChanged signal and similar). When libnm decides to modify the state, there may be already D-Bus messages queued that affect this very state. 3) instead of calling g_dbus_connection_call_sync(), use the asynchronous g_dbus_connection_call(). If we would use a sepaate GMainContext for all D-Bus related calls, we could ensure that while we block for the response, we iterate that internal main context. This might be nice, because all events are processed in order and after the blocking call returns, the NMClient state is up to date. The are problems however: current blocking API does not do this, so it's a significant change in behavior. Also, it might be unexpected to the user that during the blocking call the entire content of NMClient's cache might change and all pointers to the cache might be invalidated. Also, of course NMClient would invoke signals for all the changes that happen. Another problem is that this would be more effort to implement and it involves a small performance overhead for all D-Bus related calls (because we have to serialize all events in an internal GMainContext first and then invoke them on the caller's context). Also, if the users wants this behavior, they could implement it themself by running libnm in their own GMainContext. Note that libnm might have bugs to make that really working, but that should be fixed instead of adding such synchrnous API behavior. Read also [1], for why blocking calls are wrong. [1] https://smcv.pseudorandom.co.uk/2008/11/nonblocking/ So, all possible behaviors for synchronous API have severe behavioural issues. Mark all this API as deprecated. Also, this serves the purpose of identifying blocking D-Bus calls in libnm. Note that "deprecated" here does not really mean that the API is going to be removed. We don't break API. The user may: - continue to use this API. It's deprecated, awkward and discouraged, but if it works, by all means use it. - use asynchronous API. That's the only sensible way to use D-Bus. If libnm lacks a certain asynchronous counterpart, it should be added. - use GDBusConnection directly. There really isn't anything wrong with D-Bus or GDBusConnection. This deprecated API is just a wrapper around g_dbus_connection_call_sync(). You may call it directly without feeling dirty. --- The only other remainging API is the synchronous GInitable call for NMClient. That is an entirely separate beast and not particularly wrong (from an API point of view). Note that synchronous API in NMSecretAgentOld, NMVpnPluginOld and NMVpnServicePlugin as not deprecated here. These types are not part of the D-Bus cache and while they have similar issues, it's less severe because they have less state.
2019-09-04 13:58:43 +02:00
#define NM_CLIENT_WIRELESS_HARDWARE_ENABLED "wireless-hardware-enabled"
#define NM_CLIENT_WWAN_HARDWARE_ENABLED "wwan-hardware-enabled"
#define NM_CLIENT_WIMAX_HARDWARE_ENABLED "wimax-hardware-enabled"
libnm: deprecate synchronous/blocking API in libnm Note that D-Bus is fundamentally asynchronous. Doing blocking calls on top of D-Bus is odd, especially for libnm's NMClient. That is because NMClient essentially is a client-side cache of the objects from the D-Bus interface. This cache should be filled exclusively by (asynchronous) D-Bus events (PropertiesChanged). So, making a blocking D-Bus call means to wait for a response and return it, while queuing all messages that are received in the meantime. Basically there are three ways how a synchronous API on NMClient could behave: 1) the call just calls g_dbus_connection_call_sync(). This means that libnm sends a D-Bus request via GDBusConnection, and blockingly waits for the response. All D-Bus messages that get received in the meantime are queued in the GMainContext that belongs to NMClient. That means, none of these D-Bus events are processed until we iterate the GMainContext after the call returns. The effect is, that NMClient (and all cached objects in there) are unaffected by the D-Bus request. Most of the synchronous API calls in libnm are of this kind. The problem is that the strict ordering of D-Bus events gets violated. For some API this is not an immediate problem. Take for example nm_device_wifi_request_scan(). The call merely blockingly tells NetworkManager to start scanning, but since NetworkManager's D-Bus API does not directly expose any state that tells whether we are currently scanning, this out of order processing of the D-Bus request is a small issue. The problem is more obvious for nm_client_networking_set_enabled(). After calling it, NM_CLIENT_NETWORKING_ENABLED is still unaffected and unchanged, because the PropertiesChanged signal from D-Bus is not yet processed. This means, while you make such a blocking call, NMClient's state does not change. But usually you perform the synchronous call to change some state. In this form, the blocking call is not useful, because NMClient only changes the state after iterating the GMainContext, and not after the blocking call returns. 2) like 1), but after making the blocking g_dbus_connection_call_sync(), update the NMClient cache artificially. This is what nm_manager_check_connectivity() does, to "fix" bgo#784629. This also has the problem of out-of-order events, but it kinda solves the problem of not changing the state during the blocking call. But it does so by hacking the state of the cache. I think this is really wrong because the state should only be updated from the ordered stream of D-Bus messages (PropertiesChanged signal and similar). When libnm decides to modify the state, there may be already D-Bus messages queued that affect this very state. 3) instead of calling g_dbus_connection_call_sync(), use the asynchronous g_dbus_connection_call(). If we would use a sepaate GMainContext for all D-Bus related calls, we could ensure that while we block for the response, we iterate that internal main context. This might be nice, because all events are processed in order and after the blocking call returns, the NMClient state is up to date. The are problems however: current blocking API does not do this, so it's a significant change in behavior. Also, it might be unexpected to the user that during the blocking call the entire content of NMClient's cache might change and all pointers to the cache might be invalidated. Also, of course NMClient would invoke signals for all the changes that happen. Another problem is that this would be more effort to implement and it involves a small performance overhead for all D-Bus related calls (because we have to serialize all events in an internal GMainContext first and then invoke them on the caller's context). Also, if the users wants this behavior, they could implement it themself by running libnm in their own GMainContext. Note that libnm might have bugs to make that really working, but that should be fixed instead of adding such synchrnous API behavior. Read also [1], for why blocking calls are wrong. [1] https://smcv.pseudorandom.co.uk/2008/11/nonblocking/ So, all possible behaviors for synchronous API have severe behavioural issues. Mark all this API as deprecated. Also, this serves the purpose of identifying blocking D-Bus calls in libnm. Note that "deprecated" here does not really mean that the API is going to be removed. We don't break API. The user may: - continue to use this API. It's deprecated, awkward and discouraged, but if it works, by all means use it. - use asynchronous API. That's the only sensible way to use D-Bus. If libnm lacks a certain asynchronous counterpart, it should be added. - use GDBusConnection directly. There really isn't anything wrong with D-Bus or GDBusConnection. This deprecated API is just a wrapper around g_dbus_connection_call_sync(). You may call it directly without feeling dirty. --- The only other remainging API is the synchronous GInitable call for NMClient. That is an entirely separate beast and not particularly wrong (from an API point of view). Note that synchronous API in NMSecretAgentOld, NMVpnPluginOld and NMVpnServicePlugin as not deprecated here. These types are not part of the D-Bus cache and while they have similar issues, it's less severe because they have less state.
2019-09-04 13:58:43 +02:00
#define NM_CLIENT_ACTIVE_CONNECTIONS "active-connections"
#define NM_CLIENT_CONNECTIVITY "connectivity"
libnm: refactor caching of D-Bus objects in NMClient No longer use GDBusObjectMangaerClient and gdbus-codegen generated classes for the NMClient cache. Instead, use GDBusConnection directly and a custom implementation (NMLDBusObject) for caching D-Bus' ObjectManager data. CHANGES ------- - This is a complete rework. I think the previous implementation was difficult to understand. There were unfixed bugs and nobody understood the code well enough to fix them. Maybe somebody out there understood the code, but I certainly did not. At least nobody provided patches to fix those issues. I do believe that this implementation is more straightforward and easier to understand. It removes a lot of layers of code. Whether this claim of simplicity is true, each reader must decide for himself/herself. Note that it is still fairly complex. - There was a lingering performance issue with large number of D-Bus objects. The patch tries hard that the implementation scales well. Of course, when we cache N objects that have N-to-M references to other, we still are fundamentally O(N*M) for runtime and memory consumption (with M being the number of references between objects). But each part should behave efficiently and well. - Play well with GMainContext. libnm code (NMClient) is generally not thread safe. However, it should work to use multiple instances in parallel, as long as each access to a NMClient is through the caller's GMainContext. This follows glib's style and effectively allows to use NMClient in a multi threaded scenario. This implies to stick to a main context upon construction and ensure that callbacks are only invoked when iterating that context. Also, NMClient itself shall never iterate the caller's context. This also means, libnm must never use g_idle_add() or g_timeout_add(), as those enqueue sources in the g_main_context_default() context. - Get ordering of messages right. All events are consistently enqueued in a GMainContext and processed strictly in order. For example, previously "nm-object.c" tried to combine signals and emit them on an idle handler. That is wrong, signals must be emitted in the right order and when they happen. Note that when using GInitable's synchronous initialization to initialize the NMClient instance, NMClient internally still operates fully asynchronously. In that case NMClient has an internal main context. - NMClient takes over most of the functionality. When using D-Bus' ObjectManager interface, one needs to handle basically the entire state of the D-Bus interface. That cannot be separated well into distinct parts, and even if you try, you just end up having closely related code in different source files. Spreading related code does not make it easier to understand, on the contrary. That means, NMClient is inherently complex as it contains most of the logic. I think that is not avoidable, but it's not as bad as it sounds. - NMClient processes D-Bus messages and state changes in separate steps. First NMClient unpacks the message (e.g. _dbus_handle_properties_changed()) and keeps track of the changed data. Then we update the GObject instances (_dbus_handle_obj_changed_dbus()) without emitting any signals yet. Finally, we emit all signals and notifications that were collected (_dbus_handle_changes_commit()). Note that for example during the initial GetManagedObjects() reply, NMClient receive a large amount of state at once. But we first apply all the changes to our GObject instances before emitting any signals. The result is that signals are always emitted in a moment when the cache is consistent. The unavoidable downside is that when you receive a property changed signal, possibly many other properties changed already and more signals are about to be emitted. - NMDeviceWifi no longer modifies the content of the cache from client side during poke_wireless_devices_with_rf_status(). The content of the cache should be determined by D-Bus alone and follow what NetworkManager service exposes. Local modifications should be avoided. - This aims to bring no API/ABI change, though it does of course bring various subtle changes in behavior. Those should be all for the better, but the goal is not to break any existing clients. This does change internal (albeit externally visible) API, like dropping NM_OBJECT_DBUS_OBJECT_MANAGER property and NMObject no longer implementing GInitableIface and GAsyncInitableIface. - Some uses of gdbus-codegen classes remain in NMVpnPluginOld, NMVpnServicePlugin and NMSecretAgentOld. These are independent of NMClient/NMObject and should be reworked separately. - While we no longer use generated classes from gdbus-codegen, we don't need more glue code than before. Also before we constructed NMPropertiesInfo and a had large amount of code to propagate properties from NMDBus* to NMObject. That got completely reworked, but did not fundamentally change. You still need about the same effort to create the NMLDBusMetaIface. Not using generated bindings did not make anything worse (which tells about the usefulness of generated code, at least in the way it was used). - NMLDBusMetaIface and other meta data is static and immutable. This avoids copying them around. Also, macros like NML_DBUS_META_PROPERTY_INIT_U() have compile time checks to ensure the property types matches. It's pretty hard to misuse them because it won't compile. - The meta data now explicitly encodes the expected D-Bus types and makes sure never to accept wrong data. That would only matter when the server (accidentally or intentionally) exposes unexpected types on D-Bus. I don't think that was previously ensured in all cases. For example, demarshal_generic() only cared about the GObject property type, it didn't know the expected D-Bus type. - Previously GDBusObjectManager would sometimes emit warnings (g_log()). Those probably indicated real bugs. In any case, it prevented us from running CI with G_DEBUG=fatal-warnings, because there would be just too many unrelated crashes. Now we log debug messages that can be enabled with "LIBNM_CLIENT_DEBUG=trace". Some of these messages can also be turned into g_warning()/g_critical() by setting LIBNM_CLIENT_DEBUG=warning,error. Together with G_DEBUG=fatal-warnings, this turns them into assertions. Note that such "assertion failures" might also happen because of a server bug (or change). Thus these are not common assertions that indicate a bug in libnm and are thus not armed unless explicitly requested. In our CI we should now always run with LIBNM_CLIENT_DEBUG=warning,error and G_DEBUG=fatal-warnings and to catch bugs. Note that currently NetworkManager has bugs in this regard, so enabling this will result in assertion failures. That should be fixed first. - Note that this changes the order in which we emit "notify:devices" and "device-added" signals. I think it makes the most sense to emit first "device-removed", then "notify:devices", and finally "device-added" signals. This changes behavior for commit 52ae28f6e5bf ('libnm: queue added/removed signals and suppress uninitialized notifications'), but I don't think that users should actually rely on the order. Still, the new order makes the most sense to me. - In NetworkManager, profiles can be invisible to the user by setting "connection.permissions". Such profiles would be hidden by NMClient's nm_client_get_connections() and their "connection-added"/"connection-removed" signals. Note that NMActiveConnection's nm_active_connection_get_connection() and NMDevice's nm_device_get_available_connections() still exposes such hidden NMRemoteConnection instances. This behavior was preserved. NUMBERS ------- I compared 3 versions of libnm. [1] 962297f9085d, current tip of nm-1-20 branch [2] 4fad8c7c642e, current master, immediate parent of this patch [3] this patch All tests were done on Fedora 31, x86_64, gcc 9.2.1-1.fc31. The libraries were build with $ ./contrib/fedora/rpm/build_clean.sh -g -w test -W debug Note that RPM build already stripped the library. --- N1) File size of libnm.so.0.1.0 in bytes. There currently seems to be a issue on Fedora 31 generating wrong ELF notes. Usually, libnm is smaller but in these tests it had large (and bogus) ELF notes. Anyway, the point is to show the relative sizes, so it doesn't matter). [1] 4075552 (102.7%) [2] 3969624 (100.0%) [3] 3705208 ( 93.3%) --- N2) `size /usr/lib64/libnm.so.0.1.0`: text data bss dec hex filename [1] 1314569 (102.0%) 69980 ( 94.8%) 10632 ( 80.4%) 1395181 (101.4%) 1549ed /usr/lib64/libnm.so.0.1.0 [2] 1288410 (100.0%) 73796 (100.0%) 13224 (100.0%) 1375430 (100.0%) 14fcc6 /usr/lib64/libnm.so.0.1.0 [3] 1229066 ( 95.4%) 65248 ( 88.4%) 13400 (101.3%) 1307714 ( 95.1%) 13f442 /usr/lib64/libnm.so.0.1.0 --- N3) Performance test with test-client.py. With checkout of [2], run ``` prepare_checkout() { rm -rf /tmp/nm-test && \ git checkout -B test 4fad8c7c642e && \ git clean -fdx && \ ./autogen.sh --prefix=/tmp/nm-test && \ make -j 5 install && \ make -j 5 check-local-clients-tests-test-client } prepare_test() { NM_TEST_REGENERATE=1 NM_TEST_CLIENT_BUILDDIR="/data/src/NetworkManager" NM_TEST_CLIENT_NMCLI_PATH=/usr/bin/nmcli python3 ./clients/tests/test-client.py -v } do_test() { for i in {1..10}; do NM_TEST_CLIENT_BUILDDIR="/data/src/NetworkManager" NM_TEST_CLIENT_NMCLI_PATH=/usr/bin/nmcli python3 ./clients/tests/test-client.py -v || return -1 done echo "done!" } prepare_checkout prepare_test time do_test ``` [1] real 2m14.497s (101.3%) user 5m26.651s (100.3%) sys 1m40.453s (101.4%) [2] real 2m12.800s (100.0%) user 5m25.619s (100.0%) sys 1m39.065s (100.0%) [3] real 1m54.915s ( 86.5%) user 4m18.585s ( 79.4%) sys 1m32.066s ( 92.9%) --- N4) Performance. Run NetworkManager from build [2] and setup a large number of profiles (551 profiles and 515 devices, mostly unrealized). This setup is already at the edge of what NetworkManager currently can handle. Of course, that is a different issue. Here we just check how long plain `nmcli` takes on the system. ``` do_cleanup() { for UUID in $(nmcli -g NAME,UUID connection show | sed -n 's/^xx-c-.*:\([^:]\+\)$/\1/p'); do nmcli connection delete uuid "$UUID" done for DEVICE in $(nmcli -g DEVICE device status | grep '^xx-i-'); do nmcli device delete "$DEVICE" done } do_setup() { do_cleanup for i in {1..30}; do nmcli connection add type bond autoconnect no con-name xx-c-bond-$i ifname xx-i-bond-$i ipv4.method disabled ipv6.method ignore for j in $(seq $i 30); do nmcli connection add type vlan autoconnect no con-name xx-c-vlan-$i-$j vlan.id $j ifname xx-i-vlan-$i-$j vlan.parent xx-i-bond-$i ipv4.method disabled ipv6.method ignore done done systemctl restart NetworkManager.service sleep 5 } do_test() { perf stat -r 50 -B nmcli 1>/dev/null } do_test ``` [1] Performance counter stats for 'nmcli' (50 runs): 456.33 msec task-clock:u # 1.093 CPUs utilized ( +- 0.44% ) 0 context-switches:u # 0.000 K/sec 0 cpu-migrations:u # 0.000 K/sec 5,900 page-faults:u # 0.013 M/sec ( +- 0.02% ) 1,408,675,453 cycles:u # 3.087 GHz ( +- 0.48% ) 1,594,741,060 instructions:u # 1.13 insn per cycle ( +- 0.02% ) 368,744,018 branches:u # 808.061 M/sec ( +- 0.02% ) 4,566,058 branch-misses:u # 1.24% of all branches ( +- 0.76% ) 0.41761 +- 0.00282 seconds time elapsed ( +- 0.68% ) [2] Performance counter stats for 'nmcli' (50 runs): 477.99 msec task-clock:u # 1.088 CPUs utilized ( +- 0.36% ) 0 context-switches:u # 0.000 K/sec 0 cpu-migrations:u # 0.000 K/sec 5,948 page-faults:u # 0.012 M/sec ( +- 0.03% ) 1,471,133,482 cycles:u # 3.078 GHz ( +- 0.36% ) 1,655,275,369 instructions:u # 1.13 insn per cycle ( +- 0.02% ) 382,595,152 branches:u # 800.433 M/sec ( +- 0.02% ) 4,746,070 branch-misses:u # 1.24% of all branches ( +- 0.49% ) 0.43923 +- 0.00242 seconds time elapsed ( +- 0.55% ) [3] Performance counter stats for 'nmcli' (50 runs): 352.36 msec task-clock:u # 1.027 CPUs utilized ( +- 0.32% ) 0 context-switches:u # 0.000 K/sec 0 cpu-migrations:u # 0.000 K/sec 4,790 page-faults:u # 0.014 M/sec ( +- 0.26% ) 1,092,341,186 cycles:u # 3.100 GHz ( +- 0.26% ) 1,209,045,283 instructions:u # 1.11 insn per cycle ( +- 0.02% ) 281,708,462 branches:u # 799.499 M/sec ( +- 0.01% ) 3,101,031 branch-misses:u # 1.10% of all branches ( +- 0.61% ) 0.34296 +- 0.00120 seconds time elapsed ( +- 0.35% ) --- N5) same setup as N4), but run `PAGER= /bin/time -v nmcli`: [1] Command being timed: "nmcli" User time (seconds): 0.42 System time (seconds): 0.04 Percent of CPU this job got: 107% Elapsed (wall clock) time (h:mm:ss or m:ss): 0:00.43 Average shared text size (kbytes): 0 Average unshared data size (kbytes): 0 Average stack size (kbytes): 0 Average total size (kbytes): 0 Maximum resident set size (kbytes): 34456 Average resident set size (kbytes): 0 Major (requiring I/O) page faults: 0 Minor (reclaiming a frame) page faults: 6128 Voluntary context switches: 1298 Involuntary context switches: 1106 Swaps: 0 File system inputs: 0 File system outputs: 0 Socket messages sent: 0 Socket messages received: 0 Signals delivered: 0 Page size (bytes): 4096 Exit status: 0 [2] Command being timed: "nmcli" User time (seconds): 0.44 System time (seconds): 0.04 Percent of CPU this job got: 108% Elapsed (wall clock) time (h:mm:ss or m:ss): 0:00.44 Average shared text size (kbytes): 0 Average unshared data size (kbytes): 0 Average stack size (kbytes): 0 Average total size (kbytes): 0 Maximum resident set size (kbytes): 34452 Average resident set size (kbytes): 0 Major (requiring I/O) page faults: 0 Minor (reclaiming a frame) page faults: 6169 Voluntary context switches: 1849 Involuntary context switches: 142 Swaps: 0 File system inputs: 0 File system outputs: 0 Socket messages sent: 0 Socket messages received: 0 Signals delivered: 0 Page size (bytes): 4096 Exit status: 0 [3] Command being timed: "nmcli" User time (seconds): 0.32 System time (seconds): 0.02 Percent of CPU this job got: 102% Elapsed (wall clock) time (h:mm:ss or m:ss): 0:00.34 Average shared text size (kbytes): 0 Average unshared data size (kbytes): 0 Average stack size (kbytes): 0 Average total size (kbytes): 0 Maximum resident set size (kbytes): 29196 Average resident set size (kbytes): 0 Major (requiring I/O) page faults: 0 Minor (reclaiming a frame) page faults: 5059 Voluntary context switches: 919 Involuntary context switches: 685 Swaps: 0 File system inputs: 0 File system outputs: 0 Socket messages sent: 0 Socket messages received: 0 Signals delivered: 0 Page size (bytes): 4096 Exit status: 0 --- N6) same setup as N4), but run `nmcli monitor` and look at `ps aux` for the RSS size. USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND [1] me 1492900 21.0 0.2 461348 33248 pts/10 Sl+ 15:02 0:00 nmcli monitor [2] me 1490721 5.0 0.2 461496 33548 pts/10 Sl+ 15:00 0:00 nmcli monitor [3] me 1495801 16.5 0.1 459476 28692 pts/10 Sl+ 15:04 0:00 nmcli monitor
2019-10-30 11:42:58 +01:00
#define NM_CLIENT_CONNECTIVITY_CHECK_URI "connectivity-check-uri"
#define NM_CLIENT_CONNECTIVITY_CHECK_AVAILABLE "connectivity-check-available"
libnm: deprecate synchronous/blocking API in libnm Note that D-Bus is fundamentally asynchronous. Doing blocking calls on top of D-Bus is odd, especially for libnm's NMClient. That is because NMClient essentially is a client-side cache of the objects from the D-Bus interface. This cache should be filled exclusively by (asynchronous) D-Bus events (PropertiesChanged). So, making a blocking D-Bus call means to wait for a response and return it, while queuing all messages that are received in the meantime. Basically there are three ways how a synchronous API on NMClient could behave: 1) the call just calls g_dbus_connection_call_sync(). This means that libnm sends a D-Bus request via GDBusConnection, and blockingly waits for the response. All D-Bus messages that get received in the meantime are queued in the GMainContext that belongs to NMClient. That means, none of these D-Bus events are processed until we iterate the GMainContext after the call returns. The effect is, that NMClient (and all cached objects in there) are unaffected by the D-Bus request. Most of the synchronous API calls in libnm are of this kind. The problem is that the strict ordering of D-Bus events gets violated. For some API this is not an immediate problem. Take for example nm_device_wifi_request_scan(). The call merely blockingly tells NetworkManager to start scanning, but since NetworkManager's D-Bus API does not directly expose any state that tells whether we are currently scanning, this out of order processing of the D-Bus request is a small issue. The problem is more obvious for nm_client_networking_set_enabled(). After calling it, NM_CLIENT_NETWORKING_ENABLED is still unaffected and unchanged, because the PropertiesChanged signal from D-Bus is not yet processed. This means, while you make such a blocking call, NMClient's state does not change. But usually you perform the synchronous call to change some state. In this form, the blocking call is not useful, because NMClient only changes the state after iterating the GMainContext, and not after the blocking call returns. 2) like 1), but after making the blocking g_dbus_connection_call_sync(), update the NMClient cache artificially. This is what nm_manager_check_connectivity() does, to "fix" bgo#784629. This also has the problem of out-of-order events, but it kinda solves the problem of not changing the state during the blocking call. But it does so by hacking the state of the cache. I think this is really wrong because the state should only be updated from the ordered stream of D-Bus messages (PropertiesChanged signal and similar). When libnm decides to modify the state, there may be already D-Bus messages queued that affect this very state. 3) instead of calling g_dbus_connection_call_sync(), use the asynchronous g_dbus_connection_call(). If we would use a sepaate GMainContext for all D-Bus related calls, we could ensure that while we block for the response, we iterate that internal main context. This might be nice, because all events are processed in order and after the blocking call returns, the NMClient state is up to date. The are problems however: current blocking API does not do this, so it's a significant change in behavior. Also, it might be unexpected to the user that during the blocking call the entire content of NMClient's cache might change and all pointers to the cache might be invalidated. Also, of course NMClient would invoke signals for all the changes that happen. Another problem is that this would be more effort to implement and it involves a small performance overhead for all D-Bus related calls (because we have to serialize all events in an internal GMainContext first and then invoke them on the caller's context). Also, if the users wants this behavior, they could implement it themself by running libnm in their own GMainContext. Note that libnm might have bugs to make that really working, but that should be fixed instead of adding such synchrnous API behavior. Read also [1], for why blocking calls are wrong. [1] https://smcv.pseudorandom.co.uk/2008/11/nonblocking/ So, all possible behaviors for synchronous API have severe behavioural issues. Mark all this API as deprecated. Also, this serves the purpose of identifying blocking D-Bus calls in libnm. Note that "deprecated" here does not really mean that the API is going to be removed. We don't break API. The user may: - continue to use this API. It's deprecated, awkward and discouraged, but if it works, by all means use it. - use asynchronous API. That's the only sensible way to use D-Bus. If libnm lacks a certain asynchronous counterpart, it should be added. - use GDBusConnection directly. There really isn't anything wrong with D-Bus or GDBusConnection. This deprecated API is just a wrapper around g_dbus_connection_call_sync(). You may call it directly without feeling dirty. --- The only other remainging API is the synchronous GInitable call for NMClient. That is an entirely separate beast and not particularly wrong (from an API point of view). Note that synchronous API in NMSecretAgentOld, NMVpnPluginOld and NMVpnServicePlugin as not deprecated here. These types are not part of the D-Bus cache and while they have similar issues, it's less severe because they have less state.
2019-09-04 13:58:43 +02:00
_NM_DEPRECATED_SYNC_WRITABLE_PROPERTY
#define NM_CLIENT_CONNECTIVITY_CHECK_ENABLED "connectivity-check-enabled"
libnm: deprecate synchronous/blocking API in libnm Note that D-Bus is fundamentally asynchronous. Doing blocking calls on top of D-Bus is odd, especially for libnm's NMClient. That is because NMClient essentially is a client-side cache of the objects from the D-Bus interface. This cache should be filled exclusively by (asynchronous) D-Bus events (PropertiesChanged). So, making a blocking D-Bus call means to wait for a response and return it, while queuing all messages that are received in the meantime. Basically there are three ways how a synchronous API on NMClient could behave: 1) the call just calls g_dbus_connection_call_sync(). This means that libnm sends a D-Bus request via GDBusConnection, and blockingly waits for the response. All D-Bus messages that get received in the meantime are queued in the GMainContext that belongs to NMClient. That means, none of these D-Bus events are processed until we iterate the GMainContext after the call returns. The effect is, that NMClient (and all cached objects in there) are unaffected by the D-Bus request. Most of the synchronous API calls in libnm are of this kind. The problem is that the strict ordering of D-Bus events gets violated. For some API this is not an immediate problem. Take for example nm_device_wifi_request_scan(). The call merely blockingly tells NetworkManager to start scanning, but since NetworkManager's D-Bus API does not directly expose any state that tells whether we are currently scanning, this out of order processing of the D-Bus request is a small issue. The problem is more obvious for nm_client_networking_set_enabled(). After calling it, NM_CLIENT_NETWORKING_ENABLED is still unaffected and unchanged, because the PropertiesChanged signal from D-Bus is not yet processed. This means, while you make such a blocking call, NMClient's state does not change. But usually you perform the synchronous call to change some state. In this form, the blocking call is not useful, because NMClient only changes the state after iterating the GMainContext, and not after the blocking call returns. 2) like 1), but after making the blocking g_dbus_connection_call_sync(), update the NMClient cache artificially. This is what nm_manager_check_connectivity() does, to "fix" bgo#784629. This also has the problem of out-of-order events, but it kinda solves the problem of not changing the state during the blocking call. But it does so by hacking the state of the cache. I think this is really wrong because the state should only be updated from the ordered stream of D-Bus messages (PropertiesChanged signal and similar). When libnm decides to modify the state, there may be already D-Bus messages queued that affect this very state. 3) instead of calling g_dbus_connection_call_sync(), use the asynchronous g_dbus_connection_call(). If we would use a sepaate GMainContext for all D-Bus related calls, we could ensure that while we block for the response, we iterate that internal main context. This might be nice, because all events are processed in order and after the blocking call returns, the NMClient state is up to date. The are problems however: current blocking API does not do this, so it's a significant change in behavior. Also, it might be unexpected to the user that during the blocking call the entire content of NMClient's cache might change and all pointers to the cache might be invalidated. Also, of course NMClient would invoke signals for all the changes that happen. Another problem is that this would be more effort to implement and it involves a small performance overhead for all D-Bus related calls (because we have to serialize all events in an internal GMainContext first and then invoke them on the caller's context). Also, if the users wants this behavior, they could implement it themself by running libnm in their own GMainContext. Note that libnm might have bugs to make that really working, but that should be fixed instead of adding such synchrnous API behavior. Read also [1], for why blocking calls are wrong. [1] https://smcv.pseudorandom.co.uk/2008/11/nonblocking/ So, all possible behaviors for synchronous API have severe behavioural issues. Mark all this API as deprecated. Also, this serves the purpose of identifying blocking D-Bus calls in libnm. Note that "deprecated" here does not really mean that the API is going to be removed. We don't break API. The user may: - continue to use this API. It's deprecated, awkward and discouraged, but if it works, by all means use it. - use asynchronous API. That's the only sensible way to use D-Bus. If libnm lacks a certain asynchronous counterpart, it should be added. - use GDBusConnection directly. There really isn't anything wrong with D-Bus or GDBusConnection. This deprecated API is just a wrapper around g_dbus_connection_call_sync(). You may call it directly without feeling dirty. --- The only other remainging API is the synchronous GInitable call for NMClient. That is an entirely separate beast and not particularly wrong (from an API point of view). Note that synchronous API in NMSecretAgentOld, NMVpnPluginOld and NMVpnServicePlugin as not deprecated here. These types are not part of the D-Bus cache and while they have similar issues, it's less severe because they have less state.
2019-09-04 13:58:43 +02:00
#define NM_CLIENT_PRIMARY_CONNECTION "primary-connection"
#define NM_CLIENT_ACTIVATING_CONNECTION "activating-connection"
#define NM_CLIENT_DEVICES "devices"
#define NM_CLIENT_ALL_DEVICES "all-devices"
#define NM_CLIENT_CONNECTIONS "connections"
#define NM_CLIENT_HOSTNAME "hostname"
#define NM_CLIENT_CAN_MODIFY "can-modify"
#define NM_CLIENT_METERED "metered"
#define NM_CLIENT_DNS_MODE "dns-mode"
#define NM_CLIENT_DNS_RC_MANAGER "dns-rc-manager"
#define NM_CLIENT_DNS_CONFIGURATION "dns-configuration"
#define NM_CLIENT_CHECKPOINTS "checkpoints"
#define NM_CLIENT_CAPABILITIES "capabilities"
#define NM_CLIENT_PERMISSIONS_STATE "permissions-state"
#define NM_CLIENT_DEVICE_ADDED "device-added"
#define NM_CLIENT_DEVICE_REMOVED "device-removed"
#define NM_CLIENT_ANY_DEVICE_ADDED "any-device-added"
#define NM_CLIENT_ANY_DEVICE_REMOVED "any-device-removed"
#define NM_CLIENT_PERMISSION_CHANGED "permission-changed"
#define NM_CLIENT_CONNECTION_ADDED "connection-added"
#define NM_CLIENT_CONNECTION_REMOVED "connection-removed"
#define NM_CLIENT_ACTIVE_CONNECTION_ADDED "active-connection-added"
#define NM_CLIENT_ACTIVE_CONNECTION_REMOVED "active-connection-removed"
/**
* NMClientError:
* @NM_CLIENT_ERROR_FAILED: unknown or unclassified error
* @NM_CLIENT_ERROR_MANAGER_NOT_RUNNING: an operation that requires NetworkManager
* failed because NetworkManager is not running
* @NM_CLIENT_ERROR_OBJECT_CREATION_FAILED: NetworkManager claimed that an
* operation succeeded, but the object that was allegedly created (eg,
* #NMRemoteConnection, #NMActiveConnection) was apparently destroyed before
* #NMClient could create a representation of it.
*
* Describes errors that may result from operations involving a #NMClient.
*
* D-Bus operations may also return errors from other domains, including
* #NMManagerError, #NMSettingsError, #NMAgentManagerError, and #NMConnectionError.
**/
typedef enum {
NM_CLIENT_ERROR_FAILED = 0,
NM_CLIENT_ERROR_MANAGER_NOT_RUNNING,
NM_CLIENT_ERROR_OBJECT_CREATION_FAILED,
} NMClientError;
#define NM_CLIENT_ERROR nm_client_error_quark ()
GQuark nm_client_error_quark (void);
/* DNS stuff */
typedef struct NMDnsEntry NMDnsEntry;
NM_AVAILABLE_IN_1_6
GType nm_dns_entry_get_type (void);
NM_AVAILABLE_IN_1_6
void nm_dns_entry_unref (NMDnsEntry *entry);
NM_AVAILABLE_IN_1_6
const char * nm_dns_entry_get_interface (NMDnsEntry *entry);
NM_AVAILABLE_IN_1_6
const char * const *nm_dns_entry_get_nameservers (NMDnsEntry *entry);
NM_AVAILABLE_IN_1_6
const char * const *nm_dns_entry_get_domains (NMDnsEntry *entry);
NM_AVAILABLE_IN_1_6
int nm_dns_entry_get_priority (NMDnsEntry *entry);
NM_AVAILABLE_IN_1_6
gboolean nm_dns_entry_get_vpn (NMDnsEntry *entry);
/**
* NMClient:
*/
typedef struct _NMClientClass NMClientClass;
GType nm_client_get_type (void);
NMClient *nm_client_new (GCancellable *cancellable,
GError **error);
void nm_client_new_async (GCancellable *cancellable,
GAsyncReadyCallback callback,
gpointer user_data);
NMClient *nm_client_new_finish (GAsyncResult *result,
GError **error);
NM_AVAILABLE_IN_1_24
NMClientInstanceFlags nm_client_get_instance_flags (NMClient *self);
NM_AVAILABLE_IN_1_22
GDBusConnection *nm_client_get_dbus_connection (NMClient *client);
NM_AVAILABLE_IN_1_22
GMainContext *nm_client_get_main_context (NMClient *self);
libnm: fix leaking internal GMainContext for synchronously initialized NMClient NMClient makes asynchronous D-Bus calls via g_dbus_connection_call(). This references the current GMainContext to later invoke the asynchronous callback. Even when we cancel the asynchronous call, the callback will still be invoked (later) to complete the request. In particular this means when we destroy (unref) an NMClient, there are quite possibly pending requests in the GMainContext. Although they are cancelled, they keep the GMainContext alive. With synchronous initialization, we have an internal GMainContext. When we destroy the NMClient, we cannot just unhook the integrated source, instead, we need to keep it integrated in the caller's main context, as long as there are pending requests. Add a mechanism to track those pending requests and fix the leak for the internal GMainContext. Also expose the same mechanism to the user via a new API called nm_client_get_context_busy_watcher(). This allows the user to know when it can stop iterating the main context and when all resources are reclaimed. For example the following will lead to a crash: for i in range(1,2000): nmc = NM.Client.new(None) This creates a number of NMClient instances and destroys them again. Note that here the GMainContext is never iterated, because synchronous initialization does not iterate the caller's context. So, while we correctly unref and dispose the created NMClient instances, there are pending requests left in the inner GMainContext. These pile up and soon the program will crash because it runs out of file descriptors. We can have a similar problem with asynchronous initialization, when we create a new GMainContext per client, and don't iterate it after we are done with the client. Note that this patch does not avoid the problem in general. The problem cannot be avoided, the user must iterate the main contex at some point. Otherwise resources (memory and file descriptors) will be leaked. Fixes: ce0e898fb476 ('libnm: refactor caching of D-Bus objects in NMClient') https://gitlab.freedesktop.org/NetworkManager/NetworkManager/merge_requests/347
2019-11-26 00:23:41 +01:00
NM_AVAILABLE_IN_1_22
GObject *nm_client_get_context_busy_watcher (NMClient *self);
NM_AVAILABLE_IN_1_22
const char *nm_client_get_dbus_name_owner (NMClient *client);
const char *nm_client_get_version (NMClient *client);
NMState nm_client_get_state (NMClient *client);
gboolean nm_client_get_startup (NMClient *client);
gboolean nm_client_get_nm_running (NMClient *client);
NMObject *nm_client_get_object_by_path (NMClient *client,
const char *dbus_path);
libnm: refactor caching of D-Bus objects in NMClient No longer use GDBusObjectMangaerClient and gdbus-codegen generated classes for the NMClient cache. Instead, use GDBusConnection directly and a custom implementation (NMLDBusObject) for caching D-Bus' ObjectManager data. CHANGES ------- - This is a complete rework. I think the previous implementation was difficult to understand. There were unfixed bugs and nobody understood the code well enough to fix them. Maybe somebody out there understood the code, but I certainly did not. At least nobody provided patches to fix those issues. I do believe that this implementation is more straightforward and easier to understand. It removes a lot of layers of code. Whether this claim of simplicity is true, each reader must decide for himself/herself. Note that it is still fairly complex. - There was a lingering performance issue with large number of D-Bus objects. The patch tries hard that the implementation scales well. Of course, when we cache N objects that have N-to-M references to other, we still are fundamentally O(N*M) for runtime and memory consumption (with M being the number of references between objects). But each part should behave efficiently and well. - Play well with GMainContext. libnm code (NMClient) is generally not thread safe. However, it should work to use multiple instances in parallel, as long as each access to a NMClient is through the caller's GMainContext. This follows glib's style and effectively allows to use NMClient in a multi threaded scenario. This implies to stick to a main context upon construction and ensure that callbacks are only invoked when iterating that context. Also, NMClient itself shall never iterate the caller's context. This also means, libnm must never use g_idle_add() or g_timeout_add(), as those enqueue sources in the g_main_context_default() context. - Get ordering of messages right. All events are consistently enqueued in a GMainContext and processed strictly in order. For example, previously "nm-object.c" tried to combine signals and emit them on an idle handler. That is wrong, signals must be emitted in the right order and when they happen. Note that when using GInitable's synchronous initialization to initialize the NMClient instance, NMClient internally still operates fully asynchronously. In that case NMClient has an internal main context. - NMClient takes over most of the functionality. When using D-Bus' ObjectManager interface, one needs to handle basically the entire state of the D-Bus interface. That cannot be separated well into distinct parts, and even if you try, you just end up having closely related code in different source files. Spreading related code does not make it easier to understand, on the contrary. That means, NMClient is inherently complex as it contains most of the logic. I think that is not avoidable, but it's not as bad as it sounds. - NMClient processes D-Bus messages and state changes in separate steps. First NMClient unpacks the message (e.g. _dbus_handle_properties_changed()) and keeps track of the changed data. Then we update the GObject instances (_dbus_handle_obj_changed_dbus()) without emitting any signals yet. Finally, we emit all signals and notifications that were collected (_dbus_handle_changes_commit()). Note that for example during the initial GetManagedObjects() reply, NMClient receive a large amount of state at once. But we first apply all the changes to our GObject instances before emitting any signals. The result is that signals are always emitted in a moment when the cache is consistent. The unavoidable downside is that when you receive a property changed signal, possibly many other properties changed already and more signals are about to be emitted. - NMDeviceWifi no longer modifies the content of the cache from client side during poke_wireless_devices_with_rf_status(). The content of the cache should be determined by D-Bus alone and follow what NetworkManager service exposes. Local modifications should be avoided. - This aims to bring no API/ABI change, though it does of course bring various subtle changes in behavior. Those should be all for the better, but the goal is not to break any existing clients. This does change internal (albeit externally visible) API, like dropping NM_OBJECT_DBUS_OBJECT_MANAGER property and NMObject no longer implementing GInitableIface and GAsyncInitableIface. - Some uses of gdbus-codegen classes remain in NMVpnPluginOld, NMVpnServicePlugin and NMSecretAgentOld. These are independent of NMClient/NMObject and should be reworked separately. - While we no longer use generated classes from gdbus-codegen, we don't need more glue code than before. Also before we constructed NMPropertiesInfo and a had large amount of code to propagate properties from NMDBus* to NMObject. That got completely reworked, but did not fundamentally change. You still need about the same effort to create the NMLDBusMetaIface. Not using generated bindings did not make anything worse (which tells about the usefulness of generated code, at least in the way it was used). - NMLDBusMetaIface and other meta data is static and immutable. This avoids copying them around. Also, macros like NML_DBUS_META_PROPERTY_INIT_U() have compile time checks to ensure the property types matches. It's pretty hard to misuse them because it won't compile. - The meta data now explicitly encodes the expected D-Bus types and makes sure never to accept wrong data. That would only matter when the server (accidentally or intentionally) exposes unexpected types on D-Bus. I don't think that was previously ensured in all cases. For example, demarshal_generic() only cared about the GObject property type, it didn't know the expected D-Bus type. - Previously GDBusObjectManager would sometimes emit warnings (g_log()). Those probably indicated real bugs. In any case, it prevented us from running CI with G_DEBUG=fatal-warnings, because there would be just too many unrelated crashes. Now we log debug messages that can be enabled with "LIBNM_CLIENT_DEBUG=trace". Some of these messages can also be turned into g_warning()/g_critical() by setting LIBNM_CLIENT_DEBUG=warning,error. Together with G_DEBUG=fatal-warnings, this turns them into assertions. Note that such "assertion failures" might also happen because of a server bug (or change). Thus these are not common assertions that indicate a bug in libnm and are thus not armed unless explicitly requested. In our CI we should now always run with LIBNM_CLIENT_DEBUG=warning,error and G_DEBUG=fatal-warnings and to catch bugs. Note that currently NetworkManager has bugs in this regard, so enabling this will result in assertion failures. That should be fixed first. - Note that this changes the order in which we emit "notify:devices" and "device-added" signals. I think it makes the most sense to emit first "device-removed", then "notify:devices", and finally "device-added" signals. This changes behavior for commit 52ae28f6e5bf ('libnm: queue added/removed signals and suppress uninitialized notifications'), but I don't think that users should actually rely on the order. Still, the new order makes the most sense to me. - In NetworkManager, profiles can be invisible to the user by setting "connection.permissions". Such profiles would be hidden by NMClient's nm_client_get_connections() and their "connection-added"/"connection-removed" signals. Note that NMActiveConnection's nm_active_connection_get_connection() and NMDevice's nm_device_get_available_connections() still exposes such hidden NMRemoteConnection instances. This behavior was preserved. NUMBERS ------- I compared 3 versions of libnm. [1] 962297f9085d, current tip of nm-1-20 branch [2] 4fad8c7c642e, current master, immediate parent of this patch [3] this patch All tests were done on Fedora 31, x86_64, gcc 9.2.1-1.fc31. The libraries were build with $ ./contrib/fedora/rpm/build_clean.sh -g -w test -W debug Note that RPM build already stripped the library. --- N1) File size of libnm.so.0.1.0 in bytes. There currently seems to be a issue on Fedora 31 generating wrong ELF notes. Usually, libnm is smaller but in these tests it had large (and bogus) ELF notes. Anyway, the point is to show the relative sizes, so it doesn't matter). [1] 4075552 (102.7%) [2] 3969624 (100.0%) [3] 3705208 ( 93.3%) --- N2) `size /usr/lib64/libnm.so.0.1.0`: text data bss dec hex filename [1] 1314569 (102.0%) 69980 ( 94.8%) 10632 ( 80.4%) 1395181 (101.4%) 1549ed /usr/lib64/libnm.so.0.1.0 [2] 1288410 (100.0%) 73796 (100.0%) 13224 (100.0%) 1375430 (100.0%) 14fcc6 /usr/lib64/libnm.so.0.1.0 [3] 1229066 ( 95.4%) 65248 ( 88.4%) 13400 (101.3%) 1307714 ( 95.1%) 13f442 /usr/lib64/libnm.so.0.1.0 --- N3) Performance test with test-client.py. With checkout of [2], run ``` prepare_checkout() { rm -rf /tmp/nm-test && \ git checkout -B test 4fad8c7c642e && \ git clean -fdx && \ ./autogen.sh --prefix=/tmp/nm-test && \ make -j 5 install && \ make -j 5 check-local-clients-tests-test-client } prepare_test() { NM_TEST_REGENERATE=1 NM_TEST_CLIENT_BUILDDIR="/data/src/NetworkManager" NM_TEST_CLIENT_NMCLI_PATH=/usr/bin/nmcli python3 ./clients/tests/test-client.py -v } do_test() { for i in {1..10}; do NM_TEST_CLIENT_BUILDDIR="/data/src/NetworkManager" NM_TEST_CLIENT_NMCLI_PATH=/usr/bin/nmcli python3 ./clients/tests/test-client.py -v || return -1 done echo "done!" } prepare_checkout prepare_test time do_test ``` [1] real 2m14.497s (101.3%) user 5m26.651s (100.3%) sys 1m40.453s (101.4%) [2] real 2m12.800s (100.0%) user 5m25.619s (100.0%) sys 1m39.065s (100.0%) [3] real 1m54.915s ( 86.5%) user 4m18.585s ( 79.4%) sys 1m32.066s ( 92.9%) --- N4) Performance. Run NetworkManager from build [2] and setup a large number of profiles (551 profiles and 515 devices, mostly unrealized). This setup is already at the edge of what NetworkManager currently can handle. Of course, that is a different issue. Here we just check how long plain `nmcli` takes on the system. ``` do_cleanup() { for UUID in $(nmcli -g NAME,UUID connection show | sed -n 's/^xx-c-.*:\([^:]\+\)$/\1/p'); do nmcli connection delete uuid "$UUID" done for DEVICE in $(nmcli -g DEVICE device status | grep '^xx-i-'); do nmcli device delete "$DEVICE" done } do_setup() { do_cleanup for i in {1..30}; do nmcli connection add type bond autoconnect no con-name xx-c-bond-$i ifname xx-i-bond-$i ipv4.method disabled ipv6.method ignore for j in $(seq $i 30); do nmcli connection add type vlan autoconnect no con-name xx-c-vlan-$i-$j vlan.id $j ifname xx-i-vlan-$i-$j vlan.parent xx-i-bond-$i ipv4.method disabled ipv6.method ignore done done systemctl restart NetworkManager.service sleep 5 } do_test() { perf stat -r 50 -B nmcli 1>/dev/null } do_test ``` [1] Performance counter stats for 'nmcli' (50 runs): 456.33 msec task-clock:u # 1.093 CPUs utilized ( +- 0.44% ) 0 context-switches:u # 0.000 K/sec 0 cpu-migrations:u # 0.000 K/sec 5,900 page-faults:u # 0.013 M/sec ( +- 0.02% ) 1,408,675,453 cycles:u # 3.087 GHz ( +- 0.48% ) 1,594,741,060 instructions:u # 1.13 insn per cycle ( +- 0.02% ) 368,744,018 branches:u # 808.061 M/sec ( +- 0.02% ) 4,566,058 branch-misses:u # 1.24% of all branches ( +- 0.76% ) 0.41761 +- 0.00282 seconds time elapsed ( +- 0.68% ) [2] Performance counter stats for 'nmcli' (50 runs): 477.99 msec task-clock:u # 1.088 CPUs utilized ( +- 0.36% ) 0 context-switches:u # 0.000 K/sec 0 cpu-migrations:u # 0.000 K/sec 5,948 page-faults:u # 0.012 M/sec ( +- 0.03% ) 1,471,133,482 cycles:u # 3.078 GHz ( +- 0.36% ) 1,655,275,369 instructions:u # 1.13 insn per cycle ( +- 0.02% ) 382,595,152 branches:u # 800.433 M/sec ( +- 0.02% ) 4,746,070 branch-misses:u # 1.24% of all branches ( +- 0.49% ) 0.43923 +- 0.00242 seconds time elapsed ( +- 0.55% ) [3] Performance counter stats for 'nmcli' (50 runs): 352.36 msec task-clock:u # 1.027 CPUs utilized ( +- 0.32% ) 0 context-switches:u # 0.000 K/sec 0 cpu-migrations:u # 0.000 K/sec 4,790 page-faults:u # 0.014 M/sec ( +- 0.26% ) 1,092,341,186 cycles:u # 3.100 GHz ( +- 0.26% ) 1,209,045,283 instructions:u # 1.11 insn per cycle ( +- 0.02% ) 281,708,462 branches:u # 799.499 M/sec ( +- 0.01% ) 3,101,031 branch-misses:u # 1.10% of all branches ( +- 0.61% ) 0.34296 +- 0.00120 seconds time elapsed ( +- 0.35% ) --- N5) same setup as N4), but run `PAGER= /bin/time -v nmcli`: [1] Command being timed: "nmcli" User time (seconds): 0.42 System time (seconds): 0.04 Percent of CPU this job got: 107% Elapsed (wall clock) time (h:mm:ss or m:ss): 0:00.43 Average shared text size (kbytes): 0 Average unshared data size (kbytes): 0 Average stack size (kbytes): 0 Average total size (kbytes): 0 Maximum resident set size (kbytes): 34456 Average resident set size (kbytes): 0 Major (requiring I/O) page faults: 0 Minor (reclaiming a frame) page faults: 6128 Voluntary context switches: 1298 Involuntary context switches: 1106 Swaps: 0 File system inputs: 0 File system outputs: 0 Socket messages sent: 0 Socket messages received: 0 Signals delivered: 0 Page size (bytes): 4096 Exit status: 0 [2] Command being timed: "nmcli" User time (seconds): 0.44 System time (seconds): 0.04 Percent of CPU this job got: 108% Elapsed (wall clock) time (h:mm:ss or m:ss): 0:00.44 Average shared text size (kbytes): 0 Average unshared data size (kbytes): 0 Average stack size (kbytes): 0 Average total size (kbytes): 0 Maximum resident set size (kbytes): 34452 Average resident set size (kbytes): 0 Major (requiring I/O) page faults: 0 Minor (reclaiming a frame) page faults: 6169 Voluntary context switches: 1849 Involuntary context switches: 142 Swaps: 0 File system inputs: 0 File system outputs: 0 Socket messages sent: 0 Socket messages received: 0 Signals delivered: 0 Page size (bytes): 4096 Exit status: 0 [3] Command being timed: "nmcli" User time (seconds): 0.32 System time (seconds): 0.02 Percent of CPU this job got: 102% Elapsed (wall clock) time (h:mm:ss or m:ss): 0:00.34 Average shared text size (kbytes): 0 Average unshared data size (kbytes): 0 Average stack size (kbytes): 0 Average total size (kbytes): 0 Maximum resident set size (kbytes): 29196 Average resident set size (kbytes): 0 Major (requiring I/O) page faults: 0 Minor (reclaiming a frame) page faults: 5059 Voluntary context switches: 919 Involuntary context switches: 685 Swaps: 0 File system inputs: 0 File system outputs: 0 Socket messages sent: 0 Socket messages received: 0 Signals delivered: 0 Page size (bytes): 4096 Exit status: 0 --- N6) same setup as N4), but run `nmcli monitor` and look at `ps aux` for the RSS size. USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND [1] me 1492900 21.0 0.2 461348 33248 pts/10 Sl+ 15:02 0:00 nmcli monitor [2] me 1490721 5.0 0.2 461496 33548 pts/10 Sl+ 15:00 0:00 nmcli monitor [3] me 1495801 16.5 0.1 459476 28692 pts/10 Sl+ 15:04 0:00 nmcli monitor
2019-10-30 11:42:58 +01:00
NM_AVAILABLE_IN_1_22
NMMetered nm_client_get_metered (NMClient *client);
gboolean nm_client_networking_get_enabled (NMClient *client);
libnm: deprecate synchronous/blocking API in libnm Note that D-Bus is fundamentally asynchronous. Doing blocking calls on top of D-Bus is odd, especially for libnm's NMClient. That is because NMClient essentially is a client-side cache of the objects from the D-Bus interface. This cache should be filled exclusively by (asynchronous) D-Bus events (PropertiesChanged). So, making a blocking D-Bus call means to wait for a response and return it, while queuing all messages that are received in the meantime. Basically there are three ways how a synchronous API on NMClient could behave: 1) the call just calls g_dbus_connection_call_sync(). This means that libnm sends a D-Bus request via GDBusConnection, and blockingly waits for the response. All D-Bus messages that get received in the meantime are queued in the GMainContext that belongs to NMClient. That means, none of these D-Bus events are processed until we iterate the GMainContext after the call returns. The effect is, that NMClient (and all cached objects in there) are unaffected by the D-Bus request. Most of the synchronous API calls in libnm are of this kind. The problem is that the strict ordering of D-Bus events gets violated. For some API this is not an immediate problem. Take for example nm_device_wifi_request_scan(). The call merely blockingly tells NetworkManager to start scanning, but since NetworkManager's D-Bus API does not directly expose any state that tells whether we are currently scanning, this out of order processing of the D-Bus request is a small issue. The problem is more obvious for nm_client_networking_set_enabled(). After calling it, NM_CLIENT_NETWORKING_ENABLED is still unaffected and unchanged, because the PropertiesChanged signal from D-Bus is not yet processed. This means, while you make such a blocking call, NMClient's state does not change. But usually you perform the synchronous call to change some state. In this form, the blocking call is not useful, because NMClient only changes the state after iterating the GMainContext, and not after the blocking call returns. 2) like 1), but after making the blocking g_dbus_connection_call_sync(), update the NMClient cache artificially. This is what nm_manager_check_connectivity() does, to "fix" bgo#784629. This also has the problem of out-of-order events, but it kinda solves the problem of not changing the state during the blocking call. But it does so by hacking the state of the cache. I think this is really wrong because the state should only be updated from the ordered stream of D-Bus messages (PropertiesChanged signal and similar). When libnm decides to modify the state, there may be already D-Bus messages queued that affect this very state. 3) instead of calling g_dbus_connection_call_sync(), use the asynchronous g_dbus_connection_call(). If we would use a sepaate GMainContext for all D-Bus related calls, we could ensure that while we block for the response, we iterate that internal main context. This might be nice, because all events are processed in order and after the blocking call returns, the NMClient state is up to date. The are problems however: current blocking API does not do this, so it's a significant change in behavior. Also, it might be unexpected to the user that during the blocking call the entire content of NMClient's cache might change and all pointers to the cache might be invalidated. Also, of course NMClient would invoke signals for all the changes that happen. Another problem is that this would be more effort to implement and it involves a small performance overhead for all D-Bus related calls (because we have to serialize all events in an internal GMainContext first and then invoke them on the caller's context). Also, if the users wants this behavior, they could implement it themself by running libnm in their own GMainContext. Note that libnm might have bugs to make that really working, but that should be fixed instead of adding such synchrnous API behavior. Read also [1], for why blocking calls are wrong. [1] https://smcv.pseudorandom.co.uk/2008/11/nonblocking/ So, all possible behaviors for synchronous API have severe behavioural issues. Mark all this API as deprecated. Also, this serves the purpose of identifying blocking D-Bus calls in libnm. Note that "deprecated" here does not really mean that the API is going to be removed. We don't break API. The user may: - continue to use this API. It's deprecated, awkward and discouraged, but if it works, by all means use it. - use asynchronous API. That's the only sensible way to use D-Bus. If libnm lacks a certain asynchronous counterpart, it should be added. - use GDBusConnection directly. There really isn't anything wrong with D-Bus or GDBusConnection. This deprecated API is just a wrapper around g_dbus_connection_call_sync(). You may call it directly without feeling dirty. --- The only other remainging API is the synchronous GInitable call for NMClient. That is an entirely separate beast and not particularly wrong (from an API point of view). Note that synchronous API in NMSecretAgentOld, NMVpnPluginOld and NMVpnServicePlugin as not deprecated here. These types are not part of the D-Bus cache and while they have similar issues, it's less severe because they have less state.
2019-09-04 13:58:43 +02:00
NM_AVAILABLE_IN_1_24
const guint32 *nm_client_get_capabilities (NMClient *client,
gsize *length);
libnm: deprecate synchronous/blocking API in libnm Note that D-Bus is fundamentally asynchronous. Doing blocking calls on top of D-Bus is odd, especially for libnm's NMClient. That is because NMClient essentially is a client-side cache of the objects from the D-Bus interface. This cache should be filled exclusively by (asynchronous) D-Bus events (PropertiesChanged). So, making a blocking D-Bus call means to wait for a response and return it, while queuing all messages that are received in the meantime. Basically there are three ways how a synchronous API on NMClient could behave: 1) the call just calls g_dbus_connection_call_sync(). This means that libnm sends a D-Bus request via GDBusConnection, and blockingly waits for the response. All D-Bus messages that get received in the meantime are queued in the GMainContext that belongs to NMClient. That means, none of these D-Bus events are processed until we iterate the GMainContext after the call returns. The effect is, that NMClient (and all cached objects in there) are unaffected by the D-Bus request. Most of the synchronous API calls in libnm are of this kind. The problem is that the strict ordering of D-Bus events gets violated. For some API this is not an immediate problem. Take for example nm_device_wifi_request_scan(). The call merely blockingly tells NetworkManager to start scanning, but since NetworkManager's D-Bus API does not directly expose any state that tells whether we are currently scanning, this out of order processing of the D-Bus request is a small issue. The problem is more obvious for nm_client_networking_set_enabled(). After calling it, NM_CLIENT_NETWORKING_ENABLED is still unaffected and unchanged, because the PropertiesChanged signal from D-Bus is not yet processed. This means, while you make such a blocking call, NMClient's state does not change. But usually you perform the synchronous call to change some state. In this form, the blocking call is not useful, because NMClient only changes the state after iterating the GMainContext, and not after the blocking call returns. 2) like 1), but after making the blocking g_dbus_connection_call_sync(), update the NMClient cache artificially. This is what nm_manager_check_connectivity() does, to "fix" bgo#784629. This also has the problem of out-of-order events, but it kinda solves the problem of not changing the state during the blocking call. But it does so by hacking the state of the cache. I think this is really wrong because the state should only be updated from the ordered stream of D-Bus messages (PropertiesChanged signal and similar). When libnm decides to modify the state, there may be already D-Bus messages queued that affect this very state. 3) instead of calling g_dbus_connection_call_sync(), use the asynchronous g_dbus_connection_call(). If we would use a sepaate GMainContext for all D-Bus related calls, we could ensure that while we block for the response, we iterate that internal main context. This might be nice, because all events are processed in order and after the blocking call returns, the NMClient state is up to date. The are problems however: current blocking API does not do this, so it's a significant change in behavior. Also, it might be unexpected to the user that during the blocking call the entire content of NMClient's cache might change and all pointers to the cache might be invalidated. Also, of course NMClient would invoke signals for all the changes that happen. Another problem is that this would be more effort to implement and it involves a small performance overhead for all D-Bus related calls (because we have to serialize all events in an internal GMainContext first and then invoke them on the caller's context). Also, if the users wants this behavior, they could implement it themself by running libnm in their own GMainContext. Note that libnm might have bugs to make that really working, but that should be fixed instead of adding such synchrnous API behavior. Read also [1], for why blocking calls are wrong. [1] https://smcv.pseudorandom.co.uk/2008/11/nonblocking/ So, all possible behaviors for synchronous API have severe behavioural issues. Mark all this API as deprecated. Also, this serves the purpose of identifying blocking D-Bus calls in libnm. Note that "deprecated" here does not really mean that the API is going to be removed. We don't break API. The user may: - continue to use this API. It's deprecated, awkward and discouraged, but if it works, by all means use it. - use asynchronous API. That's the only sensible way to use D-Bus. If libnm lacks a certain asynchronous counterpart, it should be added. - use GDBusConnection directly. There really isn't anything wrong with D-Bus or GDBusConnection. This deprecated API is just a wrapper around g_dbus_connection_call_sync(). You may call it directly without feeling dirty. --- The only other remainging API is the synchronous GInitable call for NMClient. That is an entirely separate beast and not particularly wrong (from an API point of view). Note that synchronous API in NMSecretAgentOld, NMVpnPluginOld and NMVpnServicePlugin as not deprecated here. These types are not part of the D-Bus cache and while they have similar issues, it's less severe because they have less state.
2019-09-04 13:58:43 +02:00
_NM_DEPRECATED_SYNC_METHOD
gboolean nm_client_networking_set_enabled (NMClient *client,
gboolean enabled,
GError **error);
gboolean nm_client_wireless_get_enabled (NMClient *client);
libnm: deprecate synchronous/blocking API in libnm Note that D-Bus is fundamentally asynchronous. Doing blocking calls on top of D-Bus is odd, especially for libnm's NMClient. That is because NMClient essentially is a client-side cache of the objects from the D-Bus interface. This cache should be filled exclusively by (asynchronous) D-Bus events (PropertiesChanged). So, making a blocking D-Bus call means to wait for a response and return it, while queuing all messages that are received in the meantime. Basically there are three ways how a synchronous API on NMClient could behave: 1) the call just calls g_dbus_connection_call_sync(). This means that libnm sends a D-Bus request via GDBusConnection, and blockingly waits for the response. All D-Bus messages that get received in the meantime are queued in the GMainContext that belongs to NMClient. That means, none of these D-Bus events are processed until we iterate the GMainContext after the call returns. The effect is, that NMClient (and all cached objects in there) are unaffected by the D-Bus request. Most of the synchronous API calls in libnm are of this kind. The problem is that the strict ordering of D-Bus events gets violated. For some API this is not an immediate problem. Take for example nm_device_wifi_request_scan(). The call merely blockingly tells NetworkManager to start scanning, but since NetworkManager's D-Bus API does not directly expose any state that tells whether we are currently scanning, this out of order processing of the D-Bus request is a small issue. The problem is more obvious for nm_client_networking_set_enabled(). After calling it, NM_CLIENT_NETWORKING_ENABLED is still unaffected and unchanged, because the PropertiesChanged signal from D-Bus is not yet processed. This means, while you make such a blocking call, NMClient's state does not change. But usually you perform the synchronous call to change some state. In this form, the blocking call is not useful, because NMClient only changes the state after iterating the GMainContext, and not after the blocking call returns. 2) like 1), but after making the blocking g_dbus_connection_call_sync(), update the NMClient cache artificially. This is what nm_manager_check_connectivity() does, to "fix" bgo#784629. This also has the problem of out-of-order events, but it kinda solves the problem of not changing the state during the blocking call. But it does so by hacking the state of the cache. I think this is really wrong because the state should only be updated from the ordered stream of D-Bus messages (PropertiesChanged signal and similar). When libnm decides to modify the state, there may be already D-Bus messages queued that affect this very state. 3) instead of calling g_dbus_connection_call_sync(), use the asynchronous g_dbus_connection_call(). If we would use a sepaate GMainContext for all D-Bus related calls, we could ensure that while we block for the response, we iterate that internal main context. This might be nice, because all events are processed in order and after the blocking call returns, the NMClient state is up to date. The are problems however: current blocking API does not do this, so it's a significant change in behavior. Also, it might be unexpected to the user that during the blocking call the entire content of NMClient's cache might change and all pointers to the cache might be invalidated. Also, of course NMClient would invoke signals for all the changes that happen. Another problem is that this would be more effort to implement and it involves a small performance overhead for all D-Bus related calls (because we have to serialize all events in an internal GMainContext first and then invoke them on the caller's context). Also, if the users wants this behavior, they could implement it themself by running libnm in their own GMainContext. Note that libnm might have bugs to make that really working, but that should be fixed instead of adding such synchrnous API behavior. Read also [1], for why blocking calls are wrong. [1] https://smcv.pseudorandom.co.uk/2008/11/nonblocking/ So, all possible behaviors for synchronous API have severe behavioural issues. Mark all this API as deprecated. Also, this serves the purpose of identifying blocking D-Bus calls in libnm. Note that "deprecated" here does not really mean that the API is going to be removed. We don't break API. The user may: - continue to use this API. It's deprecated, awkward and discouraged, but if it works, by all means use it. - use asynchronous API. That's the only sensible way to use D-Bus. If libnm lacks a certain asynchronous counterpart, it should be added. - use GDBusConnection directly. There really isn't anything wrong with D-Bus or GDBusConnection. This deprecated API is just a wrapper around g_dbus_connection_call_sync(). You may call it directly without feeling dirty. --- The only other remainging API is the synchronous GInitable call for NMClient. That is an entirely separate beast and not particularly wrong (from an API point of view). Note that synchronous API in NMSecretAgentOld, NMVpnPluginOld and NMVpnServicePlugin as not deprecated here. These types are not part of the D-Bus cache and while they have similar issues, it's less severe because they have less state.
2019-09-04 13:58:43 +02:00
_NM_DEPRECATED_SYNC_METHOD
void nm_client_wireless_set_enabled (NMClient *client, gboolean enabled);
libnm: deprecate synchronous/blocking API in libnm Note that D-Bus is fundamentally asynchronous. Doing blocking calls on top of D-Bus is odd, especially for libnm's NMClient. That is because NMClient essentially is a client-side cache of the objects from the D-Bus interface. This cache should be filled exclusively by (asynchronous) D-Bus events (PropertiesChanged). So, making a blocking D-Bus call means to wait for a response and return it, while queuing all messages that are received in the meantime. Basically there are three ways how a synchronous API on NMClient could behave: 1) the call just calls g_dbus_connection_call_sync(). This means that libnm sends a D-Bus request via GDBusConnection, and blockingly waits for the response. All D-Bus messages that get received in the meantime are queued in the GMainContext that belongs to NMClient. That means, none of these D-Bus events are processed until we iterate the GMainContext after the call returns. The effect is, that NMClient (and all cached objects in there) are unaffected by the D-Bus request. Most of the synchronous API calls in libnm are of this kind. The problem is that the strict ordering of D-Bus events gets violated. For some API this is not an immediate problem. Take for example nm_device_wifi_request_scan(). The call merely blockingly tells NetworkManager to start scanning, but since NetworkManager's D-Bus API does not directly expose any state that tells whether we are currently scanning, this out of order processing of the D-Bus request is a small issue. The problem is more obvious for nm_client_networking_set_enabled(). After calling it, NM_CLIENT_NETWORKING_ENABLED is still unaffected and unchanged, because the PropertiesChanged signal from D-Bus is not yet processed. This means, while you make such a blocking call, NMClient's state does not change. But usually you perform the synchronous call to change some state. In this form, the blocking call is not useful, because NMClient only changes the state after iterating the GMainContext, and not after the blocking call returns. 2) like 1), but after making the blocking g_dbus_connection_call_sync(), update the NMClient cache artificially. This is what nm_manager_check_connectivity() does, to "fix" bgo#784629. This also has the problem of out-of-order events, but it kinda solves the problem of not changing the state during the blocking call. But it does so by hacking the state of the cache. I think this is really wrong because the state should only be updated from the ordered stream of D-Bus messages (PropertiesChanged signal and similar). When libnm decides to modify the state, there may be already D-Bus messages queued that affect this very state. 3) instead of calling g_dbus_connection_call_sync(), use the asynchronous g_dbus_connection_call(). If we would use a sepaate GMainContext for all D-Bus related calls, we could ensure that while we block for the response, we iterate that internal main context. This might be nice, because all events are processed in order and after the blocking call returns, the NMClient state is up to date. The are problems however: current blocking API does not do this, so it's a significant change in behavior. Also, it might be unexpected to the user that during the blocking call the entire content of NMClient's cache might change and all pointers to the cache might be invalidated. Also, of course NMClient would invoke signals for all the changes that happen. Another problem is that this would be more effort to implement and it involves a small performance overhead for all D-Bus related calls (because we have to serialize all events in an internal GMainContext first and then invoke them on the caller's context). Also, if the users wants this behavior, they could implement it themself by running libnm in their own GMainContext. Note that libnm might have bugs to make that really working, but that should be fixed instead of adding such synchrnous API behavior. Read also [1], for why blocking calls are wrong. [1] https://smcv.pseudorandom.co.uk/2008/11/nonblocking/ So, all possible behaviors for synchronous API have severe behavioural issues. Mark all this API as deprecated. Also, this serves the purpose of identifying blocking D-Bus calls in libnm. Note that "deprecated" here does not really mean that the API is going to be removed. We don't break API. The user may: - continue to use this API. It's deprecated, awkward and discouraged, but if it works, by all means use it. - use asynchronous API. That's the only sensible way to use D-Bus. If libnm lacks a certain asynchronous counterpart, it should be added. - use GDBusConnection directly. There really isn't anything wrong with D-Bus or GDBusConnection. This deprecated API is just a wrapper around g_dbus_connection_call_sync(). You may call it directly without feeling dirty. --- The only other remainging API is the synchronous GInitable call for NMClient. That is an entirely separate beast and not particularly wrong (from an API point of view). Note that synchronous API in NMSecretAgentOld, NMVpnPluginOld and NMVpnServicePlugin as not deprecated here. These types are not part of the D-Bus cache and while they have similar issues, it's less severe because they have less state.
2019-09-04 13:58:43 +02:00
gboolean nm_client_wireless_hardware_get_enabled (NMClient *client);
gboolean nm_client_wwan_get_enabled (NMClient *client);
libnm: deprecate synchronous/blocking API in libnm Note that D-Bus is fundamentally asynchronous. Doing blocking calls on top of D-Bus is odd, especially for libnm's NMClient. That is because NMClient essentially is a client-side cache of the objects from the D-Bus interface. This cache should be filled exclusively by (asynchronous) D-Bus events (PropertiesChanged). So, making a blocking D-Bus call means to wait for a response and return it, while queuing all messages that are received in the meantime. Basically there are three ways how a synchronous API on NMClient could behave: 1) the call just calls g_dbus_connection_call_sync(). This means that libnm sends a D-Bus request via GDBusConnection, and blockingly waits for the response. All D-Bus messages that get received in the meantime are queued in the GMainContext that belongs to NMClient. That means, none of these D-Bus events are processed until we iterate the GMainContext after the call returns. The effect is, that NMClient (and all cached objects in there) are unaffected by the D-Bus request. Most of the synchronous API calls in libnm are of this kind. The problem is that the strict ordering of D-Bus events gets violated. For some API this is not an immediate problem. Take for example nm_device_wifi_request_scan(). The call merely blockingly tells NetworkManager to start scanning, but since NetworkManager's D-Bus API does not directly expose any state that tells whether we are currently scanning, this out of order processing of the D-Bus request is a small issue. The problem is more obvious for nm_client_networking_set_enabled(). After calling it, NM_CLIENT_NETWORKING_ENABLED is still unaffected and unchanged, because the PropertiesChanged signal from D-Bus is not yet processed. This means, while you make such a blocking call, NMClient's state does not change. But usually you perform the synchronous call to change some state. In this form, the blocking call is not useful, because NMClient only changes the state after iterating the GMainContext, and not after the blocking call returns. 2) like 1), but after making the blocking g_dbus_connection_call_sync(), update the NMClient cache artificially. This is what nm_manager_check_connectivity() does, to "fix" bgo#784629. This also has the problem of out-of-order events, but it kinda solves the problem of not changing the state during the blocking call. But it does so by hacking the state of the cache. I think this is really wrong because the state should only be updated from the ordered stream of D-Bus messages (PropertiesChanged signal and similar). When libnm decides to modify the state, there may be already D-Bus messages queued that affect this very state. 3) instead of calling g_dbus_connection_call_sync(), use the asynchronous g_dbus_connection_call(). If we would use a sepaate GMainContext for all D-Bus related calls, we could ensure that while we block for the response, we iterate that internal main context. This might be nice, because all events are processed in order and after the blocking call returns, the NMClient state is up to date. The are problems however: current blocking API does not do this, so it's a significant change in behavior. Also, it might be unexpected to the user that during the blocking call the entire content of NMClient's cache might change and all pointers to the cache might be invalidated. Also, of course NMClient would invoke signals for all the changes that happen. Another problem is that this would be more effort to implement and it involves a small performance overhead for all D-Bus related calls (because we have to serialize all events in an internal GMainContext first and then invoke them on the caller's context). Also, if the users wants this behavior, they could implement it themself by running libnm in their own GMainContext. Note that libnm might have bugs to make that really working, but that should be fixed instead of adding such synchrnous API behavior. Read also [1], for why blocking calls are wrong. [1] https://smcv.pseudorandom.co.uk/2008/11/nonblocking/ So, all possible behaviors for synchronous API have severe behavioural issues. Mark all this API as deprecated. Also, this serves the purpose of identifying blocking D-Bus calls in libnm. Note that "deprecated" here does not really mean that the API is going to be removed. We don't break API. The user may: - continue to use this API. It's deprecated, awkward and discouraged, but if it works, by all means use it. - use asynchronous API. That's the only sensible way to use D-Bus. If libnm lacks a certain asynchronous counterpart, it should be added. - use GDBusConnection directly. There really isn't anything wrong with D-Bus or GDBusConnection. This deprecated API is just a wrapper around g_dbus_connection_call_sync(). You may call it directly without feeling dirty. --- The only other remainging API is the synchronous GInitable call for NMClient. That is an entirely separate beast and not particularly wrong (from an API point of view). Note that synchronous API in NMSecretAgentOld, NMVpnPluginOld and NMVpnServicePlugin as not deprecated here. These types are not part of the D-Bus cache and while they have similar issues, it's less severe because they have less state.
2019-09-04 13:58:43 +02:00
_NM_DEPRECATED_SYNC_METHOD
void nm_client_wwan_set_enabled (NMClient *client, gboolean enabled);
libnm: deprecate synchronous/blocking API in libnm Note that D-Bus is fundamentally asynchronous. Doing blocking calls on top of D-Bus is odd, especially for libnm's NMClient. That is because NMClient essentially is a client-side cache of the objects from the D-Bus interface. This cache should be filled exclusively by (asynchronous) D-Bus events (PropertiesChanged). So, making a blocking D-Bus call means to wait for a response and return it, while queuing all messages that are received in the meantime. Basically there are three ways how a synchronous API on NMClient could behave: 1) the call just calls g_dbus_connection_call_sync(). This means that libnm sends a D-Bus request via GDBusConnection, and blockingly waits for the response. All D-Bus messages that get received in the meantime are queued in the GMainContext that belongs to NMClient. That means, none of these D-Bus events are processed until we iterate the GMainContext after the call returns. The effect is, that NMClient (and all cached objects in there) are unaffected by the D-Bus request. Most of the synchronous API calls in libnm are of this kind. The problem is that the strict ordering of D-Bus events gets violated. For some API this is not an immediate problem. Take for example nm_device_wifi_request_scan(). The call merely blockingly tells NetworkManager to start scanning, but since NetworkManager's D-Bus API does not directly expose any state that tells whether we are currently scanning, this out of order processing of the D-Bus request is a small issue. The problem is more obvious for nm_client_networking_set_enabled(). After calling it, NM_CLIENT_NETWORKING_ENABLED is still unaffected and unchanged, because the PropertiesChanged signal from D-Bus is not yet processed. This means, while you make such a blocking call, NMClient's state does not change. But usually you perform the synchronous call to change some state. In this form, the blocking call is not useful, because NMClient only changes the state after iterating the GMainContext, and not after the blocking call returns. 2) like 1), but after making the blocking g_dbus_connection_call_sync(), update the NMClient cache artificially. This is what nm_manager_check_connectivity() does, to "fix" bgo#784629. This also has the problem of out-of-order events, but it kinda solves the problem of not changing the state during the blocking call. But it does so by hacking the state of the cache. I think this is really wrong because the state should only be updated from the ordered stream of D-Bus messages (PropertiesChanged signal and similar). When libnm decides to modify the state, there may be already D-Bus messages queued that affect this very state. 3) instead of calling g_dbus_connection_call_sync(), use the asynchronous g_dbus_connection_call(). If we would use a sepaate GMainContext for all D-Bus related calls, we could ensure that while we block for the response, we iterate that internal main context. This might be nice, because all events are processed in order and after the blocking call returns, the NMClient state is up to date. The are problems however: current blocking API does not do this, so it's a significant change in behavior. Also, it might be unexpected to the user that during the blocking call the entire content of NMClient's cache might change and all pointers to the cache might be invalidated. Also, of course NMClient would invoke signals for all the changes that happen. Another problem is that this would be more effort to implement and it involves a small performance overhead for all D-Bus related calls (because we have to serialize all events in an internal GMainContext first and then invoke them on the caller's context). Also, if the users wants this behavior, they could implement it themself by running libnm in their own GMainContext. Note that libnm might have bugs to make that really working, but that should be fixed instead of adding such synchrnous API behavior. Read also [1], for why blocking calls are wrong. [1] https://smcv.pseudorandom.co.uk/2008/11/nonblocking/ So, all possible behaviors for synchronous API have severe behavioural issues. Mark all this API as deprecated. Also, this serves the purpose of identifying blocking D-Bus calls in libnm. Note that "deprecated" here does not really mean that the API is going to be removed. We don't break API. The user may: - continue to use this API. It's deprecated, awkward and discouraged, but if it works, by all means use it. - use asynchronous API. That's the only sensible way to use D-Bus. If libnm lacks a certain asynchronous counterpart, it should be added. - use GDBusConnection directly. There really isn't anything wrong with D-Bus or GDBusConnection. This deprecated API is just a wrapper around g_dbus_connection_call_sync(). You may call it directly without feeling dirty. --- The only other remainging API is the synchronous GInitable call for NMClient. That is an entirely separate beast and not particularly wrong (from an API point of view). Note that synchronous API in NMSecretAgentOld, NMVpnPluginOld and NMVpnServicePlugin as not deprecated here. These types are not part of the D-Bus cache and while they have similar issues, it's less severe because they have less state.
2019-09-04 13:58:43 +02:00
gboolean nm_client_wwan_hardware_get_enabled (NMClient *client);
NM_DEPRECATED_IN_1_22
gboolean nm_client_wimax_get_enabled (NMClient *client);
libnm: deprecate synchronous/blocking API in libnm Note that D-Bus is fundamentally asynchronous. Doing blocking calls on top of D-Bus is odd, especially for libnm's NMClient. That is because NMClient essentially is a client-side cache of the objects from the D-Bus interface. This cache should be filled exclusively by (asynchronous) D-Bus events (PropertiesChanged). So, making a blocking D-Bus call means to wait for a response and return it, while queuing all messages that are received in the meantime. Basically there are three ways how a synchronous API on NMClient could behave: 1) the call just calls g_dbus_connection_call_sync(). This means that libnm sends a D-Bus request via GDBusConnection, and blockingly waits for the response. All D-Bus messages that get received in the meantime are queued in the GMainContext that belongs to NMClient. That means, none of these D-Bus events are processed until we iterate the GMainContext after the call returns. The effect is, that NMClient (and all cached objects in there) are unaffected by the D-Bus request. Most of the synchronous API calls in libnm are of this kind. The problem is that the strict ordering of D-Bus events gets violated. For some API this is not an immediate problem. Take for example nm_device_wifi_request_scan(). The call merely blockingly tells NetworkManager to start scanning, but since NetworkManager's D-Bus API does not directly expose any state that tells whether we are currently scanning, this out of order processing of the D-Bus request is a small issue. The problem is more obvious for nm_client_networking_set_enabled(). After calling it, NM_CLIENT_NETWORKING_ENABLED is still unaffected and unchanged, because the PropertiesChanged signal from D-Bus is not yet processed. This means, while you make such a blocking call, NMClient's state does not change. But usually you perform the synchronous call to change some state. In this form, the blocking call is not useful, because NMClient only changes the state after iterating the GMainContext, and not after the blocking call returns. 2) like 1), but after making the blocking g_dbus_connection_call_sync(), update the NMClient cache artificially. This is what nm_manager_check_connectivity() does, to "fix" bgo#784629. This also has the problem of out-of-order events, but it kinda solves the problem of not changing the state during the blocking call. But it does so by hacking the state of the cache. I think this is really wrong because the state should only be updated from the ordered stream of D-Bus messages (PropertiesChanged signal and similar). When libnm decides to modify the state, there may be already D-Bus messages queued that affect this very state. 3) instead of calling g_dbus_connection_call_sync(), use the asynchronous g_dbus_connection_call(). If we would use a sepaate GMainContext for all D-Bus related calls, we could ensure that while we block for the response, we iterate that internal main context. This might be nice, because all events are processed in order and after the blocking call returns, the NMClient state is up to date. The are problems however: current blocking API does not do this, so it's a significant change in behavior. Also, it might be unexpected to the user that during the blocking call the entire content of NMClient's cache might change and all pointers to the cache might be invalidated. Also, of course NMClient would invoke signals for all the changes that happen. Another problem is that this would be more effort to implement and it involves a small performance overhead for all D-Bus related calls (because we have to serialize all events in an internal GMainContext first and then invoke them on the caller's context). Also, if the users wants this behavior, they could implement it themself by running libnm in their own GMainContext. Note that libnm might have bugs to make that really working, but that should be fixed instead of adding such synchrnous API behavior. Read also [1], for why blocking calls are wrong. [1] https://smcv.pseudorandom.co.uk/2008/11/nonblocking/ So, all possible behaviors for synchronous API have severe behavioural issues. Mark all this API as deprecated. Also, this serves the purpose of identifying blocking D-Bus calls in libnm. Note that "deprecated" here does not really mean that the API is going to be removed. We don't break API. The user may: - continue to use this API. It's deprecated, awkward and discouraged, but if it works, by all means use it. - use asynchronous API. That's the only sensible way to use D-Bus. If libnm lacks a certain asynchronous counterpart, it should be added. - use GDBusConnection directly. There really isn't anything wrong with D-Bus or GDBusConnection. This deprecated API is just a wrapper around g_dbus_connection_call_sync(). You may call it directly without feeling dirty. --- The only other remainging API is the synchronous GInitable call for NMClient. That is an entirely separate beast and not particularly wrong (from an API point of view). Note that synchronous API in NMSecretAgentOld, NMVpnPluginOld and NMVpnServicePlugin as not deprecated here. These types are not part of the D-Bus cache and while they have similar issues, it's less severe because they have less state.
2019-09-04 13:58:43 +02:00
NM_DEPRECATED_IN_1_22
libnm: deprecate synchronous/blocking API in libnm Note that D-Bus is fundamentally asynchronous. Doing blocking calls on top of D-Bus is odd, especially for libnm's NMClient. That is because NMClient essentially is a client-side cache of the objects from the D-Bus interface. This cache should be filled exclusively by (asynchronous) D-Bus events (PropertiesChanged). So, making a blocking D-Bus call means to wait for a response and return it, while queuing all messages that are received in the meantime. Basically there are three ways how a synchronous API on NMClient could behave: 1) the call just calls g_dbus_connection_call_sync(). This means that libnm sends a D-Bus request via GDBusConnection, and blockingly waits for the response. All D-Bus messages that get received in the meantime are queued in the GMainContext that belongs to NMClient. That means, none of these D-Bus events are processed until we iterate the GMainContext after the call returns. The effect is, that NMClient (and all cached objects in there) are unaffected by the D-Bus request. Most of the synchronous API calls in libnm are of this kind. The problem is that the strict ordering of D-Bus events gets violated. For some API this is not an immediate problem. Take for example nm_device_wifi_request_scan(). The call merely blockingly tells NetworkManager to start scanning, but since NetworkManager's D-Bus API does not directly expose any state that tells whether we are currently scanning, this out of order processing of the D-Bus request is a small issue. The problem is more obvious for nm_client_networking_set_enabled(). After calling it, NM_CLIENT_NETWORKING_ENABLED is still unaffected and unchanged, because the PropertiesChanged signal from D-Bus is not yet processed. This means, while you make such a blocking call, NMClient's state does not change. But usually you perform the synchronous call to change some state. In this form, the blocking call is not useful, because NMClient only changes the state after iterating the GMainContext, and not after the blocking call returns. 2) like 1), but after making the blocking g_dbus_connection_call_sync(), update the NMClient cache artificially. This is what nm_manager_check_connectivity() does, to "fix" bgo#784629. This also has the problem of out-of-order events, but it kinda solves the problem of not changing the state during the blocking call. But it does so by hacking the state of the cache. I think this is really wrong because the state should only be updated from the ordered stream of D-Bus messages (PropertiesChanged signal and similar). When libnm decides to modify the state, there may be already D-Bus messages queued that affect this very state. 3) instead of calling g_dbus_connection_call_sync(), use the asynchronous g_dbus_connection_call(). If we would use a sepaate GMainContext for all D-Bus related calls, we could ensure that while we block for the response, we iterate that internal main context. This might be nice, because all events are processed in order and after the blocking call returns, the NMClient state is up to date. The are problems however: current blocking API does not do this, so it's a significant change in behavior. Also, it might be unexpected to the user that during the blocking call the entire content of NMClient's cache might change and all pointers to the cache might be invalidated. Also, of course NMClient would invoke signals for all the changes that happen. Another problem is that this would be more effort to implement and it involves a small performance overhead for all D-Bus related calls (because we have to serialize all events in an internal GMainContext first and then invoke them on the caller's context). Also, if the users wants this behavior, they could implement it themself by running libnm in their own GMainContext. Note that libnm might have bugs to make that really working, but that should be fixed instead of adding such synchrnous API behavior. Read also [1], for why blocking calls are wrong. [1] https://smcv.pseudorandom.co.uk/2008/11/nonblocking/ So, all possible behaviors for synchronous API have severe behavioural issues. Mark all this API as deprecated. Also, this serves the purpose of identifying blocking D-Bus calls in libnm. Note that "deprecated" here does not really mean that the API is going to be removed. We don't break API. The user may: - continue to use this API. It's deprecated, awkward and discouraged, but if it works, by all means use it. - use asynchronous API. That's the only sensible way to use D-Bus. If libnm lacks a certain asynchronous counterpart, it should be added. - use GDBusConnection directly. There really isn't anything wrong with D-Bus or GDBusConnection. This deprecated API is just a wrapper around g_dbus_connection_call_sync(). You may call it directly without feeling dirty. --- The only other remainging API is the synchronous GInitable call for NMClient. That is an entirely separate beast and not particularly wrong (from an API point of view). Note that synchronous API in NMSecretAgentOld, NMVpnPluginOld and NMVpnServicePlugin as not deprecated here. These types are not part of the D-Bus cache and while they have similar issues, it's less severe because they have less state.
2019-09-04 13:58:43 +02:00
_NM_DEPRECATED_SYNC_METHOD
void nm_client_wimax_set_enabled (NMClient *client, gboolean enabled);
libnm: deprecate synchronous/blocking API in libnm Note that D-Bus is fundamentally asynchronous. Doing blocking calls on top of D-Bus is odd, especially for libnm's NMClient. That is because NMClient essentially is a client-side cache of the objects from the D-Bus interface. This cache should be filled exclusively by (asynchronous) D-Bus events (PropertiesChanged). So, making a blocking D-Bus call means to wait for a response and return it, while queuing all messages that are received in the meantime. Basically there are three ways how a synchronous API on NMClient could behave: 1) the call just calls g_dbus_connection_call_sync(). This means that libnm sends a D-Bus request via GDBusConnection, and blockingly waits for the response. All D-Bus messages that get received in the meantime are queued in the GMainContext that belongs to NMClient. That means, none of these D-Bus events are processed until we iterate the GMainContext after the call returns. The effect is, that NMClient (and all cached objects in there) are unaffected by the D-Bus request. Most of the synchronous API calls in libnm are of this kind. The problem is that the strict ordering of D-Bus events gets violated. For some API this is not an immediate problem. Take for example nm_device_wifi_request_scan(). The call merely blockingly tells NetworkManager to start scanning, but since NetworkManager's D-Bus API does not directly expose any state that tells whether we are currently scanning, this out of order processing of the D-Bus request is a small issue. The problem is more obvious for nm_client_networking_set_enabled(). After calling it, NM_CLIENT_NETWORKING_ENABLED is still unaffected and unchanged, because the PropertiesChanged signal from D-Bus is not yet processed. This means, while you make such a blocking call, NMClient's state does not change. But usually you perform the synchronous call to change some state. In this form, the blocking call is not useful, because NMClient only changes the state after iterating the GMainContext, and not after the blocking call returns. 2) like 1), but after making the blocking g_dbus_connection_call_sync(), update the NMClient cache artificially. This is what nm_manager_check_connectivity() does, to "fix" bgo#784629. This also has the problem of out-of-order events, but it kinda solves the problem of not changing the state during the blocking call. But it does so by hacking the state of the cache. I think this is really wrong because the state should only be updated from the ordered stream of D-Bus messages (PropertiesChanged signal and similar). When libnm decides to modify the state, there may be already D-Bus messages queued that affect this very state. 3) instead of calling g_dbus_connection_call_sync(), use the asynchronous g_dbus_connection_call(). If we would use a sepaate GMainContext for all D-Bus related calls, we could ensure that while we block for the response, we iterate that internal main context. This might be nice, because all events are processed in order and after the blocking call returns, the NMClient state is up to date. The are problems however: current blocking API does not do this, so it's a significant change in behavior. Also, it might be unexpected to the user that during the blocking call the entire content of NMClient's cache might change and all pointers to the cache might be invalidated. Also, of course NMClient would invoke signals for all the changes that happen. Another problem is that this would be more effort to implement and it involves a small performance overhead for all D-Bus related calls (because we have to serialize all events in an internal GMainContext first and then invoke them on the caller's context). Also, if the users wants this behavior, they could implement it themself by running libnm in their own GMainContext. Note that libnm might have bugs to make that really working, but that should be fixed instead of adding such synchrnous API behavior. Read also [1], for why blocking calls are wrong. [1] https://smcv.pseudorandom.co.uk/2008/11/nonblocking/ So, all possible behaviors for synchronous API have severe behavioural issues. Mark all this API as deprecated. Also, this serves the purpose of identifying blocking D-Bus calls in libnm. Note that "deprecated" here does not really mean that the API is going to be removed. We don't break API. The user may: - continue to use this API. It's deprecated, awkward and discouraged, but if it works, by all means use it. - use asynchronous API. That's the only sensible way to use D-Bus. If libnm lacks a certain asynchronous counterpart, it should be added. - use GDBusConnection directly. There really isn't anything wrong with D-Bus or GDBusConnection. This deprecated API is just a wrapper around g_dbus_connection_call_sync(). You may call it directly without feeling dirty. --- The only other remainging API is the synchronous GInitable call for NMClient. That is an entirely separate beast and not particularly wrong (from an API point of view). Note that synchronous API in NMSecretAgentOld, NMVpnPluginOld and NMVpnServicePlugin as not deprecated here. These types are not part of the D-Bus cache and while they have similar issues, it's less severe because they have less state.
2019-09-04 13:58:43 +02:00
NM_DEPRECATED_IN_1_22
gboolean nm_client_wimax_hardware_get_enabled (NMClient *client);
NM_AVAILABLE_IN_1_10
gboolean nm_client_connectivity_check_get_available (NMClient *client);
NM_AVAILABLE_IN_1_10
gboolean nm_client_connectivity_check_get_enabled (NMClient *client);
NM_AVAILABLE_IN_1_10
libnm: deprecate synchronous/blocking API in libnm Note that D-Bus is fundamentally asynchronous. Doing blocking calls on top of D-Bus is odd, especially for libnm's NMClient. That is because NMClient essentially is a client-side cache of the objects from the D-Bus interface. This cache should be filled exclusively by (asynchronous) D-Bus events (PropertiesChanged). So, making a blocking D-Bus call means to wait for a response and return it, while queuing all messages that are received in the meantime. Basically there are three ways how a synchronous API on NMClient could behave: 1) the call just calls g_dbus_connection_call_sync(). This means that libnm sends a D-Bus request via GDBusConnection, and blockingly waits for the response. All D-Bus messages that get received in the meantime are queued in the GMainContext that belongs to NMClient. That means, none of these D-Bus events are processed until we iterate the GMainContext after the call returns. The effect is, that NMClient (and all cached objects in there) are unaffected by the D-Bus request. Most of the synchronous API calls in libnm are of this kind. The problem is that the strict ordering of D-Bus events gets violated. For some API this is not an immediate problem. Take for example nm_device_wifi_request_scan(). The call merely blockingly tells NetworkManager to start scanning, but since NetworkManager's D-Bus API does not directly expose any state that tells whether we are currently scanning, this out of order processing of the D-Bus request is a small issue. The problem is more obvious for nm_client_networking_set_enabled(). After calling it, NM_CLIENT_NETWORKING_ENABLED is still unaffected and unchanged, because the PropertiesChanged signal from D-Bus is not yet processed. This means, while you make such a blocking call, NMClient's state does not change. But usually you perform the synchronous call to change some state. In this form, the blocking call is not useful, because NMClient only changes the state after iterating the GMainContext, and not after the blocking call returns. 2) like 1), but after making the blocking g_dbus_connection_call_sync(), update the NMClient cache artificially. This is what nm_manager_check_connectivity() does, to "fix" bgo#784629. This also has the problem of out-of-order events, but it kinda solves the problem of not changing the state during the blocking call. But it does so by hacking the state of the cache. I think this is really wrong because the state should only be updated from the ordered stream of D-Bus messages (PropertiesChanged signal and similar). When libnm decides to modify the state, there may be already D-Bus messages queued that affect this very state. 3) instead of calling g_dbus_connection_call_sync(), use the asynchronous g_dbus_connection_call(). If we would use a sepaate GMainContext for all D-Bus related calls, we could ensure that while we block for the response, we iterate that internal main context. This might be nice, because all events are processed in order and after the blocking call returns, the NMClient state is up to date. The are problems however: current blocking API does not do this, so it's a significant change in behavior. Also, it might be unexpected to the user that during the blocking call the entire content of NMClient's cache might change and all pointers to the cache might be invalidated. Also, of course NMClient would invoke signals for all the changes that happen. Another problem is that this would be more effort to implement and it involves a small performance overhead for all D-Bus related calls (because we have to serialize all events in an internal GMainContext first and then invoke them on the caller's context). Also, if the users wants this behavior, they could implement it themself by running libnm in their own GMainContext. Note that libnm might have bugs to make that really working, but that should be fixed instead of adding such synchrnous API behavior. Read also [1], for why blocking calls are wrong. [1] https://smcv.pseudorandom.co.uk/2008/11/nonblocking/ So, all possible behaviors for synchronous API have severe behavioural issues. Mark all this API as deprecated. Also, this serves the purpose of identifying blocking D-Bus calls in libnm. Note that "deprecated" here does not really mean that the API is going to be removed. We don't break API. The user may: - continue to use this API. It's deprecated, awkward and discouraged, but if it works, by all means use it. - use asynchronous API. That's the only sensible way to use D-Bus. If libnm lacks a certain asynchronous counterpart, it should be added. - use GDBusConnection directly. There really isn't anything wrong with D-Bus or GDBusConnection. This deprecated API is just a wrapper around g_dbus_connection_call_sync(). You may call it directly without feeling dirty. --- The only other remainging API is the synchronous GInitable call for NMClient. That is an entirely separate beast and not particularly wrong (from an API point of view). Note that synchronous API in NMSecretAgentOld, NMVpnPluginOld and NMVpnServicePlugin as not deprecated here. These types are not part of the D-Bus cache and while they have similar issues, it's less severe because they have less state.
2019-09-04 13:58:43 +02:00
_NM_DEPRECATED_SYNC_METHOD
void nm_client_connectivity_check_set_enabled (NMClient *client,
gboolean enabled);
NM_AVAILABLE_IN_1_20
const char *nm_client_connectivity_check_get_uri (NMClient *client);
_NM_DEPRECATED_SYNC_METHOD
gboolean nm_client_get_logging (NMClient *client,
char **level,
char **domains,
GError **error);
_NM_DEPRECATED_SYNC_METHOD
gboolean nm_client_set_logging (NMClient *client,
const char *level,
const char *domains,
GError **error);
NMClientPermissionResult nm_client_get_permission_result (NMClient *client,
NMClientPermission permission);
NM_AVAILABLE_IN_1_24
NMTernary nm_client_get_permissions_state (NMClient *self);
NMConnectivityState nm_client_get_connectivity (NMClient *client);
libnm: deprecate synchronous/blocking API in libnm Note that D-Bus is fundamentally asynchronous. Doing blocking calls on top of D-Bus is odd, especially for libnm's NMClient. That is because NMClient essentially is a client-side cache of the objects from the D-Bus interface. This cache should be filled exclusively by (asynchronous) D-Bus events (PropertiesChanged). So, making a blocking D-Bus call means to wait for a response and return it, while queuing all messages that are received in the meantime. Basically there are three ways how a synchronous API on NMClient could behave: 1) the call just calls g_dbus_connection_call_sync(). This means that libnm sends a D-Bus request via GDBusConnection, and blockingly waits for the response. All D-Bus messages that get received in the meantime are queued in the GMainContext that belongs to NMClient. That means, none of these D-Bus events are processed until we iterate the GMainContext after the call returns. The effect is, that NMClient (and all cached objects in there) are unaffected by the D-Bus request. Most of the synchronous API calls in libnm are of this kind. The problem is that the strict ordering of D-Bus events gets violated. For some API this is not an immediate problem. Take for example nm_device_wifi_request_scan(). The call merely blockingly tells NetworkManager to start scanning, but since NetworkManager's D-Bus API does not directly expose any state that tells whether we are currently scanning, this out of order processing of the D-Bus request is a small issue. The problem is more obvious for nm_client_networking_set_enabled(). After calling it, NM_CLIENT_NETWORKING_ENABLED is still unaffected and unchanged, because the PropertiesChanged signal from D-Bus is not yet processed. This means, while you make such a blocking call, NMClient's state does not change. But usually you perform the synchronous call to change some state. In this form, the blocking call is not useful, because NMClient only changes the state after iterating the GMainContext, and not after the blocking call returns. 2) like 1), but after making the blocking g_dbus_connection_call_sync(), update the NMClient cache artificially. This is what nm_manager_check_connectivity() does, to "fix" bgo#784629. This also has the problem of out-of-order events, but it kinda solves the problem of not changing the state during the blocking call. But it does so by hacking the state of the cache. I think this is really wrong because the state should only be updated from the ordered stream of D-Bus messages (PropertiesChanged signal and similar). When libnm decides to modify the state, there may be already D-Bus messages queued that affect this very state. 3) instead of calling g_dbus_connection_call_sync(), use the asynchronous g_dbus_connection_call(). If we would use a sepaate GMainContext for all D-Bus related calls, we could ensure that while we block for the response, we iterate that internal main context. This might be nice, because all events are processed in order and after the blocking call returns, the NMClient state is up to date. The are problems however: current blocking API does not do this, so it's a significant change in behavior. Also, it might be unexpected to the user that during the blocking call the entire content of NMClient's cache might change and all pointers to the cache might be invalidated. Also, of course NMClient would invoke signals for all the changes that happen. Another problem is that this would be more effort to implement and it involves a small performance overhead for all D-Bus related calls (because we have to serialize all events in an internal GMainContext first and then invoke them on the caller's context). Also, if the users wants this behavior, they could implement it themself by running libnm in their own GMainContext. Note that libnm might have bugs to make that really working, but that should be fixed instead of adding such synchrnous API behavior. Read also [1], for why blocking calls are wrong. [1] https://smcv.pseudorandom.co.uk/2008/11/nonblocking/ So, all possible behaviors for synchronous API have severe behavioural issues. Mark all this API as deprecated. Also, this serves the purpose of identifying blocking D-Bus calls in libnm. Note that "deprecated" here does not really mean that the API is going to be removed. We don't break API. The user may: - continue to use this API. It's deprecated, awkward and discouraged, but if it works, by all means use it. - use asynchronous API. That's the only sensible way to use D-Bus. If libnm lacks a certain asynchronous counterpart, it should be added. - use GDBusConnection directly. There really isn't anything wrong with D-Bus or GDBusConnection. This deprecated API is just a wrapper around g_dbus_connection_call_sync(). You may call it directly without feeling dirty. --- The only other remainging API is the synchronous GInitable call for NMClient. That is an entirely separate beast and not particularly wrong (from an API point of view). Note that synchronous API in NMSecretAgentOld, NMVpnPluginOld and NMVpnServicePlugin as not deprecated here. These types are not part of the D-Bus cache and while they have similar issues, it's less severe because they have less state.
2019-09-04 13:58:43 +02:00
_NM_DEPRECATED_SYNC_METHOD
NM_DEPRECATED_IN_1_22
NMConnectivityState nm_client_check_connectivity (NMClient *client,
GCancellable *cancellable,
GError **error);
libnm: deprecate synchronous/blocking API in libnm Note that D-Bus is fundamentally asynchronous. Doing blocking calls on top of D-Bus is odd, especially for libnm's NMClient. That is because NMClient essentially is a client-side cache of the objects from the D-Bus interface. This cache should be filled exclusively by (asynchronous) D-Bus events (PropertiesChanged). So, making a blocking D-Bus call means to wait for a response and return it, while queuing all messages that are received in the meantime. Basically there are three ways how a synchronous API on NMClient could behave: 1) the call just calls g_dbus_connection_call_sync(). This means that libnm sends a D-Bus request via GDBusConnection, and blockingly waits for the response. All D-Bus messages that get received in the meantime are queued in the GMainContext that belongs to NMClient. That means, none of these D-Bus events are processed until we iterate the GMainContext after the call returns. The effect is, that NMClient (and all cached objects in there) are unaffected by the D-Bus request. Most of the synchronous API calls in libnm are of this kind. The problem is that the strict ordering of D-Bus events gets violated. For some API this is not an immediate problem. Take for example nm_device_wifi_request_scan(). The call merely blockingly tells NetworkManager to start scanning, but since NetworkManager's D-Bus API does not directly expose any state that tells whether we are currently scanning, this out of order processing of the D-Bus request is a small issue. The problem is more obvious for nm_client_networking_set_enabled(). After calling it, NM_CLIENT_NETWORKING_ENABLED is still unaffected and unchanged, because the PropertiesChanged signal from D-Bus is not yet processed. This means, while you make such a blocking call, NMClient's state does not change. But usually you perform the synchronous call to change some state. In this form, the blocking call is not useful, because NMClient only changes the state after iterating the GMainContext, and not after the blocking call returns. 2) like 1), but after making the blocking g_dbus_connection_call_sync(), update the NMClient cache artificially. This is what nm_manager_check_connectivity() does, to "fix" bgo#784629. This also has the problem of out-of-order events, but it kinda solves the problem of not changing the state during the blocking call. But it does so by hacking the state of the cache. I think this is really wrong because the state should only be updated from the ordered stream of D-Bus messages (PropertiesChanged signal and similar). When libnm decides to modify the state, there may be already D-Bus messages queued that affect this very state. 3) instead of calling g_dbus_connection_call_sync(), use the asynchronous g_dbus_connection_call(). If we would use a sepaate GMainContext for all D-Bus related calls, we could ensure that while we block for the response, we iterate that internal main context. This might be nice, because all events are processed in order and after the blocking call returns, the NMClient state is up to date. The are problems however: current blocking API does not do this, so it's a significant change in behavior. Also, it might be unexpected to the user that during the blocking call the entire content of NMClient's cache might change and all pointers to the cache might be invalidated. Also, of course NMClient would invoke signals for all the changes that happen. Another problem is that this would be more effort to implement and it involves a small performance overhead for all D-Bus related calls (because we have to serialize all events in an internal GMainContext first and then invoke them on the caller's context). Also, if the users wants this behavior, they could implement it themself by running libnm in their own GMainContext. Note that libnm might have bugs to make that really working, but that should be fixed instead of adding such synchrnous API behavior. Read also [1], for why blocking calls are wrong. [1] https://smcv.pseudorandom.co.uk/2008/11/nonblocking/ So, all possible behaviors for synchronous API have severe behavioural issues. Mark all this API as deprecated. Also, this serves the purpose of identifying blocking D-Bus calls in libnm. Note that "deprecated" here does not really mean that the API is going to be removed. We don't break API. The user may: - continue to use this API. It's deprecated, awkward and discouraged, but if it works, by all means use it. - use asynchronous API. That's the only sensible way to use D-Bus. If libnm lacks a certain asynchronous counterpart, it should be added. - use GDBusConnection directly. There really isn't anything wrong with D-Bus or GDBusConnection. This deprecated API is just a wrapper around g_dbus_connection_call_sync(). You may call it directly without feeling dirty. --- The only other remainging API is the synchronous GInitable call for NMClient. That is an entirely separate beast and not particularly wrong (from an API point of view). Note that synchronous API in NMSecretAgentOld, NMVpnPluginOld and NMVpnServicePlugin as not deprecated here. These types are not part of the D-Bus cache and while they have similar issues, it's less severe because they have less state.
2019-09-04 13:58:43 +02:00
void nm_client_check_connectivity_async (NMClient *client,
GCancellable *cancellable,
GAsyncReadyCallback callback,
gpointer user_data);
NMConnectivityState nm_client_check_connectivity_finish (NMClient *client,
GAsyncResult *result,
GError **error);
libnm: deprecate synchronous/blocking API in libnm Note that D-Bus is fundamentally asynchronous. Doing blocking calls on top of D-Bus is odd, especially for libnm's NMClient. That is because NMClient essentially is a client-side cache of the objects from the D-Bus interface. This cache should be filled exclusively by (asynchronous) D-Bus events (PropertiesChanged). So, making a blocking D-Bus call means to wait for a response and return it, while queuing all messages that are received in the meantime. Basically there are three ways how a synchronous API on NMClient could behave: 1) the call just calls g_dbus_connection_call_sync(). This means that libnm sends a D-Bus request via GDBusConnection, and blockingly waits for the response. All D-Bus messages that get received in the meantime are queued in the GMainContext that belongs to NMClient. That means, none of these D-Bus events are processed until we iterate the GMainContext after the call returns. The effect is, that NMClient (and all cached objects in there) are unaffected by the D-Bus request. Most of the synchronous API calls in libnm are of this kind. The problem is that the strict ordering of D-Bus events gets violated. For some API this is not an immediate problem. Take for example nm_device_wifi_request_scan(). The call merely blockingly tells NetworkManager to start scanning, but since NetworkManager's D-Bus API does not directly expose any state that tells whether we are currently scanning, this out of order processing of the D-Bus request is a small issue. The problem is more obvious for nm_client_networking_set_enabled(). After calling it, NM_CLIENT_NETWORKING_ENABLED is still unaffected and unchanged, because the PropertiesChanged signal from D-Bus is not yet processed. This means, while you make such a blocking call, NMClient's state does not change. But usually you perform the synchronous call to change some state. In this form, the blocking call is not useful, because NMClient only changes the state after iterating the GMainContext, and not after the blocking call returns. 2) like 1), but after making the blocking g_dbus_connection_call_sync(), update the NMClient cache artificially. This is what nm_manager_check_connectivity() does, to "fix" bgo#784629. This also has the problem of out-of-order events, but it kinda solves the problem of not changing the state during the blocking call. But it does so by hacking the state of the cache. I think this is really wrong because the state should only be updated from the ordered stream of D-Bus messages (PropertiesChanged signal and similar). When libnm decides to modify the state, there may be already D-Bus messages queued that affect this very state. 3) instead of calling g_dbus_connection_call_sync(), use the asynchronous g_dbus_connection_call(). If we would use a sepaate GMainContext for all D-Bus related calls, we could ensure that while we block for the response, we iterate that internal main context. This might be nice, because all events are processed in order and after the blocking call returns, the NMClient state is up to date. The are problems however: current blocking API does not do this, so it's a significant change in behavior. Also, it might be unexpected to the user that during the blocking call the entire content of NMClient's cache might change and all pointers to the cache might be invalidated. Also, of course NMClient would invoke signals for all the changes that happen. Another problem is that this would be more effort to implement and it involves a small performance overhead for all D-Bus related calls (because we have to serialize all events in an internal GMainContext first and then invoke them on the caller's context). Also, if the users wants this behavior, they could implement it themself by running libnm in their own GMainContext. Note that libnm might have bugs to make that really working, but that should be fixed instead of adding such synchrnous API behavior. Read also [1], for why blocking calls are wrong. [1] https://smcv.pseudorandom.co.uk/2008/11/nonblocking/ So, all possible behaviors for synchronous API have severe behavioural issues. Mark all this API as deprecated. Also, this serves the purpose of identifying blocking D-Bus calls in libnm. Note that "deprecated" here does not really mean that the API is going to be removed. We don't break API. The user may: - continue to use this API. It's deprecated, awkward and discouraged, but if it works, by all means use it. - use asynchronous API. That's the only sensible way to use D-Bus. If libnm lacks a certain asynchronous counterpart, it should be added. - use GDBusConnection directly. There really isn't anything wrong with D-Bus or GDBusConnection. This deprecated API is just a wrapper around g_dbus_connection_call_sync(). You may call it directly without feeling dirty. --- The only other remainging API is the synchronous GInitable call for NMClient. That is an entirely separate beast and not particularly wrong (from an API point of view). Note that synchronous API in NMSecretAgentOld, NMVpnPluginOld and NMVpnServicePlugin as not deprecated here. These types are not part of the D-Bus cache and while they have similar issues, it's less severe because they have less state.
2019-09-04 13:58:43 +02:00
_NM_DEPRECATED_SYNC_METHOD
gboolean nm_client_save_hostname (NMClient *client,
const char *hostname,
GCancellable *cancellable,
GError **error);
libnm: deprecate synchronous/blocking API in libnm Note that D-Bus is fundamentally asynchronous. Doing blocking calls on top of D-Bus is odd, especially for libnm's NMClient. That is because NMClient essentially is a client-side cache of the objects from the D-Bus interface. This cache should be filled exclusively by (asynchronous) D-Bus events (PropertiesChanged). So, making a blocking D-Bus call means to wait for a response and return it, while queuing all messages that are received in the meantime. Basically there are three ways how a synchronous API on NMClient could behave: 1) the call just calls g_dbus_connection_call_sync(). This means that libnm sends a D-Bus request via GDBusConnection, and blockingly waits for the response. All D-Bus messages that get received in the meantime are queued in the GMainContext that belongs to NMClient. That means, none of these D-Bus events are processed until we iterate the GMainContext after the call returns. The effect is, that NMClient (and all cached objects in there) are unaffected by the D-Bus request. Most of the synchronous API calls in libnm are of this kind. The problem is that the strict ordering of D-Bus events gets violated. For some API this is not an immediate problem. Take for example nm_device_wifi_request_scan(). The call merely blockingly tells NetworkManager to start scanning, but since NetworkManager's D-Bus API does not directly expose any state that tells whether we are currently scanning, this out of order processing of the D-Bus request is a small issue. The problem is more obvious for nm_client_networking_set_enabled(). After calling it, NM_CLIENT_NETWORKING_ENABLED is still unaffected and unchanged, because the PropertiesChanged signal from D-Bus is not yet processed. This means, while you make such a blocking call, NMClient's state does not change. But usually you perform the synchronous call to change some state. In this form, the blocking call is not useful, because NMClient only changes the state after iterating the GMainContext, and not after the blocking call returns. 2) like 1), but after making the blocking g_dbus_connection_call_sync(), update the NMClient cache artificially. This is what nm_manager_check_connectivity() does, to "fix" bgo#784629. This also has the problem of out-of-order events, but it kinda solves the problem of not changing the state during the blocking call. But it does so by hacking the state of the cache. I think this is really wrong because the state should only be updated from the ordered stream of D-Bus messages (PropertiesChanged signal and similar). When libnm decides to modify the state, there may be already D-Bus messages queued that affect this very state. 3) instead of calling g_dbus_connection_call_sync(), use the asynchronous g_dbus_connection_call(). If we would use a sepaate GMainContext for all D-Bus related calls, we could ensure that while we block for the response, we iterate that internal main context. This might be nice, because all events are processed in order and after the blocking call returns, the NMClient state is up to date. The are problems however: current blocking API does not do this, so it's a significant change in behavior. Also, it might be unexpected to the user that during the blocking call the entire content of NMClient's cache might change and all pointers to the cache might be invalidated. Also, of course NMClient would invoke signals for all the changes that happen. Another problem is that this would be more effort to implement and it involves a small performance overhead for all D-Bus related calls (because we have to serialize all events in an internal GMainContext first and then invoke them on the caller's context). Also, if the users wants this behavior, they could implement it themself by running libnm in their own GMainContext. Note that libnm might have bugs to make that really working, but that should be fixed instead of adding such synchrnous API behavior. Read also [1], for why blocking calls are wrong. [1] https://smcv.pseudorandom.co.uk/2008/11/nonblocking/ So, all possible behaviors for synchronous API have severe behavioural issues. Mark all this API as deprecated. Also, this serves the purpose of identifying blocking D-Bus calls in libnm. Note that "deprecated" here does not really mean that the API is going to be removed. We don't break API. The user may: - continue to use this API. It's deprecated, awkward and discouraged, but if it works, by all means use it. - use asynchronous API. That's the only sensible way to use D-Bus. If libnm lacks a certain asynchronous counterpart, it should be added. - use GDBusConnection directly. There really isn't anything wrong with D-Bus or GDBusConnection. This deprecated API is just a wrapper around g_dbus_connection_call_sync(). You may call it directly without feeling dirty. --- The only other remainging API is the synchronous GInitable call for NMClient. That is an entirely separate beast and not particularly wrong (from an API point of view). Note that synchronous API in NMSecretAgentOld, NMVpnPluginOld and NMVpnServicePlugin as not deprecated here. These types are not part of the D-Bus cache and while they have similar issues, it's less severe because they have less state.
2019-09-04 13:58:43 +02:00
void nm_client_save_hostname_async (NMClient *client,
const char *hostname,
GCancellable *cancellable,
GAsyncReadyCallback callback,
gpointer user_data);
gboolean nm_client_save_hostname_finish (NMClient *client,
GAsyncResult *result,
GError **error);
/* Devices */
const GPtrArray *nm_client_get_devices (NMClient *client);
NM_AVAILABLE_IN_1_2
const GPtrArray *nm_client_get_all_devices(NMClient *client);
NMDevice *nm_client_get_device_by_path (NMClient *client, const char *object_path);
NMDevice *nm_client_get_device_by_iface (NMClient *client, const char *iface);
/* Active Connections */
const GPtrArray *nm_client_get_active_connections (NMClient *client);
NMActiveConnection *nm_client_get_primary_connection (NMClient *client);
NMActiveConnection *nm_client_get_activating_connection (NMClient *client);
void nm_client_activate_connection_async (NMClient *client,
NMConnection *connection,
NMDevice *device,
const char *specific_object,
GCancellable *cancellable,
GAsyncReadyCallback callback,
gpointer user_data);
NMActiveConnection *nm_client_activate_connection_finish (NMClient *client,
GAsyncResult *result,
GError **error);
void nm_client_add_and_activate_connection_async (NMClient *client,
NMConnection *partial,
NMDevice *device,
const char *specific_object,
GCancellable *cancellable,
GAsyncReadyCallback callback,
gpointer user_data);
NMActiveConnection *nm_client_add_and_activate_connection_finish (NMClient *client,
GAsyncResult *result,
GError **error);
NM_AVAILABLE_IN_1_16
all: return output dictionary from "AddAndActivate2" Add a "a{sv}" output argument to "AddAndActivate2" D-Bus API. "AddAndActivate2" replaces "AddAndActivate" with more options. It also has a dictionary argument to be forward compatible so that we hopefully won't need an "AddAndActivate3". However, it lacked a similar output dictionary. Add it for future extensibility. I think this is really to workaround a shortcoming of D-Bus, which does provide strong typing and type information about its API, but does not allow to extend an existing API in a backward compatible manner. So we either resort to Method(), Method2(), Method3() variants, or a catch-all variant with a generic "a{sv}" input/output argument. In libnm, rename "nm_client_add_and_activate_connection_options()" to "nm_client_add_and_activate_connection2()". I think libnm API should have an obvious correspondence with D-Bus API. Or stated differently, if "AddAndActivateOptions" would be a better name, then the D-Bus API should be renamed. We should prefer one name over the other, but regardless of which is preferred, the naming for D-Bus and libnm API should correspond. In this case, I do think that AddAndActivate2() is a better name than AddAndActivateOptions(). Hence I rename the libnm API. Also, unless necessary, let libnm still call "AddAndActivate" instead of "AddAndActivate2". Our backward compatibility works the way that libnm requires a server version at least as new as itself. As such, libnm theoretically could assume that server version is new enough to support "AddAndActivate2" and could always use the more powerful variant. However, we don't need to break compatibility intentionally and for little gain. Here, it's easy to let libnm also handle old server API, by continuing to use "AddAndActivate" for nm_client_add_and_activate_connection(). Note that during package update, we don't restart the currently running NetworkManager instance. In such a scenario, it can easily happen that nmcli/libnm is newer than the server version. Let's try a bit harder to not break that. Changes as discussed in [1]. [1] https://gitlab.freedesktop.org/NetworkManager/NetworkManager/merge_requests/37#note_79876
2018-12-20 07:48:31 +01:00
void nm_client_add_and_activate_connection2 (NMClient *client,
NMConnection *partial,
NMDevice *device,
const char *specific_object,
GVariant *options,
GCancellable *cancellable,
GAsyncReadyCallback callback,
gpointer user_data);
NM_AVAILABLE_IN_1_16
all: return output dictionary from "AddAndActivate2" Add a "a{sv}" output argument to "AddAndActivate2" D-Bus API. "AddAndActivate2" replaces "AddAndActivate" with more options. It also has a dictionary argument to be forward compatible so that we hopefully won't need an "AddAndActivate3". However, it lacked a similar output dictionary. Add it for future extensibility. I think this is really to workaround a shortcoming of D-Bus, which does provide strong typing and type information about its API, but does not allow to extend an existing API in a backward compatible manner. So we either resort to Method(), Method2(), Method3() variants, or a catch-all variant with a generic "a{sv}" input/output argument. In libnm, rename "nm_client_add_and_activate_connection_options()" to "nm_client_add_and_activate_connection2()". I think libnm API should have an obvious correspondence with D-Bus API. Or stated differently, if "AddAndActivateOptions" would be a better name, then the D-Bus API should be renamed. We should prefer one name over the other, but regardless of which is preferred, the naming for D-Bus and libnm API should correspond. In this case, I do think that AddAndActivate2() is a better name than AddAndActivateOptions(). Hence I rename the libnm API. Also, unless necessary, let libnm still call "AddAndActivate" instead of "AddAndActivate2". Our backward compatibility works the way that libnm requires a server version at least as new as itself. As such, libnm theoretically could assume that server version is new enough to support "AddAndActivate2" and could always use the more powerful variant. However, we don't need to break compatibility intentionally and for little gain. Here, it's easy to let libnm also handle old server API, by continuing to use "AddAndActivate" for nm_client_add_and_activate_connection(). Note that during package update, we don't restart the currently running NetworkManager instance. In such a scenario, it can easily happen that nmcli/libnm is newer than the server version. Let's try a bit harder to not break that. Changes as discussed in [1]. [1] https://gitlab.freedesktop.org/NetworkManager/NetworkManager/merge_requests/37#note_79876
2018-12-20 07:48:31 +01:00
NMActiveConnection *nm_client_add_and_activate_connection2_finish (NMClient *client,
GAsyncResult *result,
GVariant **out_result,
GError **error);
libnm: deprecate synchronous/blocking API in libnm Note that D-Bus is fundamentally asynchronous. Doing blocking calls on top of D-Bus is odd, especially for libnm's NMClient. That is because NMClient essentially is a client-side cache of the objects from the D-Bus interface. This cache should be filled exclusively by (asynchronous) D-Bus events (PropertiesChanged). So, making a blocking D-Bus call means to wait for a response and return it, while queuing all messages that are received in the meantime. Basically there are three ways how a synchronous API on NMClient could behave: 1) the call just calls g_dbus_connection_call_sync(). This means that libnm sends a D-Bus request via GDBusConnection, and blockingly waits for the response. All D-Bus messages that get received in the meantime are queued in the GMainContext that belongs to NMClient. That means, none of these D-Bus events are processed until we iterate the GMainContext after the call returns. The effect is, that NMClient (and all cached objects in there) are unaffected by the D-Bus request. Most of the synchronous API calls in libnm are of this kind. The problem is that the strict ordering of D-Bus events gets violated. For some API this is not an immediate problem. Take for example nm_device_wifi_request_scan(). The call merely blockingly tells NetworkManager to start scanning, but since NetworkManager's D-Bus API does not directly expose any state that tells whether we are currently scanning, this out of order processing of the D-Bus request is a small issue. The problem is more obvious for nm_client_networking_set_enabled(). After calling it, NM_CLIENT_NETWORKING_ENABLED is still unaffected and unchanged, because the PropertiesChanged signal from D-Bus is not yet processed. This means, while you make such a blocking call, NMClient's state does not change. But usually you perform the synchronous call to change some state. In this form, the blocking call is not useful, because NMClient only changes the state after iterating the GMainContext, and not after the blocking call returns. 2) like 1), but after making the blocking g_dbus_connection_call_sync(), update the NMClient cache artificially. This is what nm_manager_check_connectivity() does, to "fix" bgo#784629. This also has the problem of out-of-order events, but it kinda solves the problem of not changing the state during the blocking call. But it does so by hacking the state of the cache. I think this is really wrong because the state should only be updated from the ordered stream of D-Bus messages (PropertiesChanged signal and similar). When libnm decides to modify the state, there may be already D-Bus messages queued that affect this very state. 3) instead of calling g_dbus_connection_call_sync(), use the asynchronous g_dbus_connection_call(). If we would use a sepaate GMainContext for all D-Bus related calls, we could ensure that while we block for the response, we iterate that internal main context. This might be nice, because all events are processed in order and after the blocking call returns, the NMClient state is up to date. The are problems however: current blocking API does not do this, so it's a significant change in behavior. Also, it might be unexpected to the user that during the blocking call the entire content of NMClient's cache might change and all pointers to the cache might be invalidated. Also, of course NMClient would invoke signals for all the changes that happen. Another problem is that this would be more effort to implement and it involves a small performance overhead for all D-Bus related calls (because we have to serialize all events in an internal GMainContext first and then invoke them on the caller's context). Also, if the users wants this behavior, they could implement it themself by running libnm in their own GMainContext. Note that libnm might have bugs to make that really working, but that should be fixed instead of adding such synchrnous API behavior. Read also [1], for why blocking calls are wrong. [1] https://smcv.pseudorandom.co.uk/2008/11/nonblocking/ So, all possible behaviors for synchronous API have severe behavioural issues. Mark all this API as deprecated. Also, this serves the purpose of identifying blocking D-Bus calls in libnm. Note that "deprecated" here does not really mean that the API is going to be removed. We don't break API. The user may: - continue to use this API. It's deprecated, awkward and discouraged, but if it works, by all means use it. - use asynchronous API. That's the only sensible way to use D-Bus. If libnm lacks a certain asynchronous counterpart, it should be added. - use GDBusConnection directly. There really isn't anything wrong with D-Bus or GDBusConnection. This deprecated API is just a wrapper around g_dbus_connection_call_sync(). You may call it directly without feeling dirty. --- The only other remainging API is the synchronous GInitable call for NMClient. That is an entirely separate beast and not particularly wrong (from an API point of view). Note that synchronous API in NMSecretAgentOld, NMVpnPluginOld and NMVpnServicePlugin as not deprecated here. These types are not part of the D-Bus cache and while they have similar issues, it's less severe because they have less state.
2019-09-04 13:58:43 +02:00
_NM_DEPRECATED_SYNC_METHOD
gboolean nm_client_deactivate_connection (NMClient *client,
NMActiveConnection *active,
GCancellable *cancellable,
GError **error);
libnm: deprecate synchronous/blocking API in libnm Note that D-Bus is fundamentally asynchronous. Doing blocking calls on top of D-Bus is odd, especially for libnm's NMClient. That is because NMClient essentially is a client-side cache of the objects from the D-Bus interface. This cache should be filled exclusively by (asynchronous) D-Bus events (PropertiesChanged). So, making a blocking D-Bus call means to wait for a response and return it, while queuing all messages that are received in the meantime. Basically there are three ways how a synchronous API on NMClient could behave: 1) the call just calls g_dbus_connection_call_sync(). This means that libnm sends a D-Bus request via GDBusConnection, and blockingly waits for the response. All D-Bus messages that get received in the meantime are queued in the GMainContext that belongs to NMClient. That means, none of these D-Bus events are processed until we iterate the GMainContext after the call returns. The effect is, that NMClient (and all cached objects in there) are unaffected by the D-Bus request. Most of the synchronous API calls in libnm are of this kind. The problem is that the strict ordering of D-Bus events gets violated. For some API this is not an immediate problem. Take for example nm_device_wifi_request_scan(). The call merely blockingly tells NetworkManager to start scanning, but since NetworkManager's D-Bus API does not directly expose any state that tells whether we are currently scanning, this out of order processing of the D-Bus request is a small issue. The problem is more obvious for nm_client_networking_set_enabled(). After calling it, NM_CLIENT_NETWORKING_ENABLED is still unaffected and unchanged, because the PropertiesChanged signal from D-Bus is not yet processed. This means, while you make such a blocking call, NMClient's state does not change. But usually you perform the synchronous call to change some state. In this form, the blocking call is not useful, because NMClient only changes the state after iterating the GMainContext, and not after the blocking call returns. 2) like 1), but after making the blocking g_dbus_connection_call_sync(), update the NMClient cache artificially. This is what nm_manager_check_connectivity() does, to "fix" bgo#784629. This also has the problem of out-of-order events, but it kinda solves the problem of not changing the state during the blocking call. But it does so by hacking the state of the cache. I think this is really wrong because the state should only be updated from the ordered stream of D-Bus messages (PropertiesChanged signal and similar). When libnm decides to modify the state, there may be already D-Bus messages queued that affect this very state. 3) instead of calling g_dbus_connection_call_sync(), use the asynchronous g_dbus_connection_call(). If we would use a sepaate GMainContext for all D-Bus related calls, we could ensure that while we block for the response, we iterate that internal main context. This might be nice, because all events are processed in order and after the blocking call returns, the NMClient state is up to date. The are problems however: current blocking API does not do this, so it's a significant change in behavior. Also, it might be unexpected to the user that during the blocking call the entire content of NMClient's cache might change and all pointers to the cache might be invalidated. Also, of course NMClient would invoke signals for all the changes that happen. Another problem is that this would be more effort to implement and it involves a small performance overhead for all D-Bus related calls (because we have to serialize all events in an internal GMainContext first and then invoke them on the caller's context). Also, if the users wants this behavior, they could implement it themself by running libnm in their own GMainContext. Note that libnm might have bugs to make that really working, but that should be fixed instead of adding such synchrnous API behavior. Read also [1], for why blocking calls are wrong. [1] https://smcv.pseudorandom.co.uk/2008/11/nonblocking/ So, all possible behaviors for synchronous API have severe behavioural issues. Mark all this API as deprecated. Also, this serves the purpose of identifying blocking D-Bus calls in libnm. Note that "deprecated" here does not really mean that the API is going to be removed. We don't break API. The user may: - continue to use this API. It's deprecated, awkward and discouraged, but if it works, by all means use it. - use asynchronous API. That's the only sensible way to use D-Bus. If libnm lacks a certain asynchronous counterpart, it should be added. - use GDBusConnection directly. There really isn't anything wrong with D-Bus or GDBusConnection. This deprecated API is just a wrapper around g_dbus_connection_call_sync(). You may call it directly without feeling dirty. --- The only other remainging API is the synchronous GInitable call for NMClient. That is an entirely separate beast and not particularly wrong (from an API point of view). Note that synchronous API in NMSecretAgentOld, NMVpnPluginOld and NMVpnServicePlugin as not deprecated here. These types are not part of the D-Bus cache and while they have similar issues, it's less severe because they have less state.
2019-09-04 13:58:43 +02:00
void nm_client_deactivate_connection_async (NMClient *client,
NMActiveConnection *active,
GCancellable *cancellable,
GAsyncReadyCallback callback,
gpointer user_data);
gboolean nm_client_deactivate_connection_finish (NMClient *client,
GAsyncResult *result,
GError **error);
/* Connections */
const GPtrArray *nm_client_get_connections (NMClient *client);
NMRemoteConnection *nm_client_get_connection_by_id (NMClient *client, const char *id);
NMRemoteConnection *nm_client_get_connection_by_path (NMClient *client, const char *path);
NMRemoteConnection *nm_client_get_connection_by_uuid (NMClient *client, const char *uuid);
void nm_client_add_connection_async (NMClient *client,
NMConnection *connection,
gboolean save_to_disk,
GCancellable *cancellable,
GAsyncReadyCallback callback,
gpointer user_data);
NMRemoteConnection *nm_client_add_connection_finish (NMClient *client,
GAsyncResult *result,
GError **error);
core,libnm: add AddConnection2() D-Bus API to block autoconnect from the start It should be possible to add a profile with autoconnect blocked form the start. Update2() has a %NM_SETTINGS_UPDATE2_FLAG_BLOCK_AUTOCONNECT flag to block autoconnect, and so we need something similar when adding a connection. As the existing AddConnection() and AddConnectionUnsaved() API is not extensible, add AddConnection2() that has flags and room for additional arguments. Then add and implement the new flag %NM_SETTINGS_ADD_CONNECTION2_FLAG_BLOCK_AUTOCONNECT for AddConnection2(). Note that libnm's nm_client_add_connection2() API can completely replace the existing nm_client_add_connection_async() call. In particular, it will automatically prefer to call the D-Bus methods AddConnection() and AddConnectionUnsaved(), in order to work with server versions older than 1.20. The purpose of this is that when upgrading the package, the running NetworkManager might still be older than the installed libnm. Anyway, so since nm_client_add_connection2_finish() also has a result output, the caller needs to decide whether he cares about that result. Hence it has an argument ignore_out_result, which allows to fallback to the old API. One might argue that a caller who doesn't care about the output results while still wanting to be backward compatible, should itself choose to call nm_client_add_connection_async() or nm_client_add_connection2(). But instead, it's more convenient if the new function can fully replace the old one, so that the caller does not need to switch which start/finish method to call. https://bugzilla.redhat.com/show_bug.cgi?id=1677068
2019-07-09 15:22:01 +02:00
NM_AVAILABLE_IN_1_20
void nm_client_add_connection2 (NMClient *client,
GVariant *settings,
NMSettingsAddConnection2Flags flags,
GVariant *args,
gboolean ignore_out_result,
GCancellable *cancellable,
GAsyncReadyCallback callback,
gpointer user_data);
NM_AVAILABLE_IN_1_20
NMRemoteConnection *nm_client_add_connection2_finish (NMClient *client,
GAsyncResult *result,
GVariant **out_result,
GError **error);
libnm: deprecate synchronous/blocking API in libnm Note that D-Bus is fundamentally asynchronous. Doing blocking calls on top of D-Bus is odd, especially for libnm's NMClient. That is because NMClient essentially is a client-side cache of the objects from the D-Bus interface. This cache should be filled exclusively by (asynchronous) D-Bus events (PropertiesChanged). So, making a blocking D-Bus call means to wait for a response and return it, while queuing all messages that are received in the meantime. Basically there are three ways how a synchronous API on NMClient could behave: 1) the call just calls g_dbus_connection_call_sync(). This means that libnm sends a D-Bus request via GDBusConnection, and blockingly waits for the response. All D-Bus messages that get received in the meantime are queued in the GMainContext that belongs to NMClient. That means, none of these D-Bus events are processed until we iterate the GMainContext after the call returns. The effect is, that NMClient (and all cached objects in there) are unaffected by the D-Bus request. Most of the synchronous API calls in libnm are of this kind. The problem is that the strict ordering of D-Bus events gets violated. For some API this is not an immediate problem. Take for example nm_device_wifi_request_scan(). The call merely blockingly tells NetworkManager to start scanning, but since NetworkManager's D-Bus API does not directly expose any state that tells whether we are currently scanning, this out of order processing of the D-Bus request is a small issue. The problem is more obvious for nm_client_networking_set_enabled(). After calling it, NM_CLIENT_NETWORKING_ENABLED is still unaffected and unchanged, because the PropertiesChanged signal from D-Bus is not yet processed. This means, while you make such a blocking call, NMClient's state does not change. But usually you perform the synchronous call to change some state. In this form, the blocking call is not useful, because NMClient only changes the state after iterating the GMainContext, and not after the blocking call returns. 2) like 1), but after making the blocking g_dbus_connection_call_sync(), update the NMClient cache artificially. This is what nm_manager_check_connectivity() does, to "fix" bgo#784629. This also has the problem of out-of-order events, but it kinda solves the problem of not changing the state during the blocking call. But it does so by hacking the state of the cache. I think this is really wrong because the state should only be updated from the ordered stream of D-Bus messages (PropertiesChanged signal and similar). When libnm decides to modify the state, there may be already D-Bus messages queued that affect this very state. 3) instead of calling g_dbus_connection_call_sync(), use the asynchronous g_dbus_connection_call(). If we would use a sepaate GMainContext for all D-Bus related calls, we could ensure that while we block for the response, we iterate that internal main context. This might be nice, because all events are processed in order and after the blocking call returns, the NMClient state is up to date. The are problems however: current blocking API does not do this, so it's a significant change in behavior. Also, it might be unexpected to the user that during the blocking call the entire content of NMClient's cache might change and all pointers to the cache might be invalidated. Also, of course NMClient would invoke signals for all the changes that happen. Another problem is that this would be more effort to implement and it involves a small performance overhead for all D-Bus related calls (because we have to serialize all events in an internal GMainContext first and then invoke them on the caller's context). Also, if the users wants this behavior, they could implement it themself by running libnm in their own GMainContext. Note that libnm might have bugs to make that really working, but that should be fixed instead of adding such synchrnous API behavior. Read also [1], for why blocking calls are wrong. [1] https://smcv.pseudorandom.co.uk/2008/11/nonblocking/ So, all possible behaviors for synchronous API have severe behavioural issues. Mark all this API as deprecated. Also, this serves the purpose of identifying blocking D-Bus calls in libnm. Note that "deprecated" here does not really mean that the API is going to be removed. We don't break API. The user may: - continue to use this API. It's deprecated, awkward and discouraged, but if it works, by all means use it. - use asynchronous API. That's the only sensible way to use D-Bus. If libnm lacks a certain asynchronous counterpart, it should be added. - use GDBusConnection directly. There really isn't anything wrong with D-Bus or GDBusConnection. This deprecated API is just a wrapper around g_dbus_connection_call_sync(). You may call it directly without feeling dirty. --- The only other remainging API is the synchronous GInitable call for NMClient. That is an entirely separate beast and not particularly wrong (from an API point of view). Note that synchronous API in NMSecretAgentOld, NMVpnPluginOld and NMVpnServicePlugin as not deprecated here. These types are not part of the D-Bus cache and while they have similar issues, it's less severe because they have less state.
2019-09-04 13:58:43 +02:00
_NM_DEPRECATED_SYNC_METHOD
gboolean nm_client_load_connections (NMClient *client,
char **filenames,
char ***failures,
GCancellable *cancellable,
GError **error);
libnm: deprecate synchronous/blocking API in libnm Note that D-Bus is fundamentally asynchronous. Doing blocking calls on top of D-Bus is odd, especially for libnm's NMClient. That is because NMClient essentially is a client-side cache of the objects from the D-Bus interface. This cache should be filled exclusively by (asynchronous) D-Bus events (PropertiesChanged). So, making a blocking D-Bus call means to wait for a response and return it, while queuing all messages that are received in the meantime. Basically there are three ways how a synchronous API on NMClient could behave: 1) the call just calls g_dbus_connection_call_sync(). This means that libnm sends a D-Bus request via GDBusConnection, and blockingly waits for the response. All D-Bus messages that get received in the meantime are queued in the GMainContext that belongs to NMClient. That means, none of these D-Bus events are processed until we iterate the GMainContext after the call returns. The effect is, that NMClient (and all cached objects in there) are unaffected by the D-Bus request. Most of the synchronous API calls in libnm are of this kind. The problem is that the strict ordering of D-Bus events gets violated. For some API this is not an immediate problem. Take for example nm_device_wifi_request_scan(). The call merely blockingly tells NetworkManager to start scanning, but since NetworkManager's D-Bus API does not directly expose any state that tells whether we are currently scanning, this out of order processing of the D-Bus request is a small issue. The problem is more obvious for nm_client_networking_set_enabled(). After calling it, NM_CLIENT_NETWORKING_ENABLED is still unaffected and unchanged, because the PropertiesChanged signal from D-Bus is not yet processed. This means, while you make such a blocking call, NMClient's state does not change. But usually you perform the synchronous call to change some state. In this form, the blocking call is not useful, because NMClient only changes the state after iterating the GMainContext, and not after the blocking call returns. 2) like 1), but after making the blocking g_dbus_connection_call_sync(), update the NMClient cache artificially. This is what nm_manager_check_connectivity() does, to "fix" bgo#784629. This also has the problem of out-of-order events, but it kinda solves the problem of not changing the state during the blocking call. But it does so by hacking the state of the cache. I think this is really wrong because the state should only be updated from the ordered stream of D-Bus messages (PropertiesChanged signal and similar). When libnm decides to modify the state, there may be already D-Bus messages queued that affect this very state. 3) instead of calling g_dbus_connection_call_sync(), use the asynchronous g_dbus_connection_call(). If we would use a sepaate GMainContext for all D-Bus related calls, we could ensure that while we block for the response, we iterate that internal main context. This might be nice, because all events are processed in order and after the blocking call returns, the NMClient state is up to date. The are problems however: current blocking API does not do this, so it's a significant change in behavior. Also, it might be unexpected to the user that during the blocking call the entire content of NMClient's cache might change and all pointers to the cache might be invalidated. Also, of course NMClient would invoke signals for all the changes that happen. Another problem is that this would be more effort to implement and it involves a small performance overhead for all D-Bus related calls (because we have to serialize all events in an internal GMainContext first and then invoke them on the caller's context). Also, if the users wants this behavior, they could implement it themself by running libnm in their own GMainContext. Note that libnm might have bugs to make that really working, but that should be fixed instead of adding such synchrnous API behavior. Read also [1], for why blocking calls are wrong. [1] https://smcv.pseudorandom.co.uk/2008/11/nonblocking/ So, all possible behaviors for synchronous API have severe behavioural issues. Mark all this API as deprecated. Also, this serves the purpose of identifying blocking D-Bus calls in libnm. Note that "deprecated" here does not really mean that the API is going to be removed. We don't break API. The user may: - continue to use this API. It's deprecated, awkward and discouraged, but if it works, by all means use it. - use asynchronous API. That's the only sensible way to use D-Bus. If libnm lacks a certain asynchronous counterpart, it should be added. - use GDBusConnection directly. There really isn't anything wrong with D-Bus or GDBusConnection. This deprecated API is just a wrapper around g_dbus_connection_call_sync(). You may call it directly without feeling dirty. --- The only other remainging API is the synchronous GInitable call for NMClient. That is an entirely separate beast and not particularly wrong (from an API point of view). Note that synchronous API in NMSecretAgentOld, NMVpnPluginOld and NMVpnServicePlugin as not deprecated here. These types are not part of the D-Bus cache and while they have similar issues, it's less severe because they have less state.
2019-09-04 13:58:43 +02:00
void nm_client_load_connections_async (NMClient *client,
char **filenames,
GCancellable *cancellable,
GAsyncReadyCallback callback,
gpointer user_data);
gboolean nm_client_load_connections_finish (NMClient *client,
char ***failures,
GAsyncResult *result,
GError **error);
libnm: deprecate synchronous/blocking API in libnm Note that D-Bus is fundamentally asynchronous. Doing blocking calls on top of D-Bus is odd, especially for libnm's NMClient. That is because NMClient essentially is a client-side cache of the objects from the D-Bus interface. This cache should be filled exclusively by (asynchronous) D-Bus events (PropertiesChanged). So, making a blocking D-Bus call means to wait for a response and return it, while queuing all messages that are received in the meantime. Basically there are three ways how a synchronous API on NMClient could behave: 1) the call just calls g_dbus_connection_call_sync(). This means that libnm sends a D-Bus request via GDBusConnection, and blockingly waits for the response. All D-Bus messages that get received in the meantime are queued in the GMainContext that belongs to NMClient. That means, none of these D-Bus events are processed until we iterate the GMainContext after the call returns. The effect is, that NMClient (and all cached objects in there) are unaffected by the D-Bus request. Most of the synchronous API calls in libnm are of this kind. The problem is that the strict ordering of D-Bus events gets violated. For some API this is not an immediate problem. Take for example nm_device_wifi_request_scan(). The call merely blockingly tells NetworkManager to start scanning, but since NetworkManager's D-Bus API does not directly expose any state that tells whether we are currently scanning, this out of order processing of the D-Bus request is a small issue. The problem is more obvious for nm_client_networking_set_enabled(). After calling it, NM_CLIENT_NETWORKING_ENABLED is still unaffected and unchanged, because the PropertiesChanged signal from D-Bus is not yet processed. This means, while you make such a blocking call, NMClient's state does not change. But usually you perform the synchronous call to change some state. In this form, the blocking call is not useful, because NMClient only changes the state after iterating the GMainContext, and not after the blocking call returns. 2) like 1), but after making the blocking g_dbus_connection_call_sync(), update the NMClient cache artificially. This is what nm_manager_check_connectivity() does, to "fix" bgo#784629. This also has the problem of out-of-order events, but it kinda solves the problem of not changing the state during the blocking call. But it does so by hacking the state of the cache. I think this is really wrong because the state should only be updated from the ordered stream of D-Bus messages (PropertiesChanged signal and similar). When libnm decides to modify the state, there may be already D-Bus messages queued that affect this very state. 3) instead of calling g_dbus_connection_call_sync(), use the asynchronous g_dbus_connection_call(). If we would use a sepaate GMainContext for all D-Bus related calls, we could ensure that while we block for the response, we iterate that internal main context. This might be nice, because all events are processed in order and after the blocking call returns, the NMClient state is up to date. The are problems however: current blocking API does not do this, so it's a significant change in behavior. Also, it might be unexpected to the user that during the blocking call the entire content of NMClient's cache might change and all pointers to the cache might be invalidated. Also, of course NMClient would invoke signals for all the changes that happen. Another problem is that this would be more effort to implement and it involves a small performance overhead for all D-Bus related calls (because we have to serialize all events in an internal GMainContext first and then invoke them on the caller's context). Also, if the users wants this behavior, they could implement it themself by running libnm in their own GMainContext. Note that libnm might have bugs to make that really working, but that should be fixed instead of adding such synchrnous API behavior. Read also [1], for why blocking calls are wrong. [1] https://smcv.pseudorandom.co.uk/2008/11/nonblocking/ So, all possible behaviors for synchronous API have severe behavioural issues. Mark all this API as deprecated. Also, this serves the purpose of identifying blocking D-Bus calls in libnm. Note that "deprecated" here does not really mean that the API is going to be removed. We don't break API. The user may: - continue to use this API. It's deprecated, awkward and discouraged, but if it works, by all means use it. - use asynchronous API. That's the only sensible way to use D-Bus. If libnm lacks a certain asynchronous counterpart, it should be added. - use GDBusConnection directly. There really isn't anything wrong with D-Bus or GDBusConnection. This deprecated API is just a wrapper around g_dbus_connection_call_sync(). You may call it directly without feeling dirty. --- The only other remainging API is the synchronous GInitable call for NMClient. That is an entirely separate beast and not particularly wrong (from an API point of view). Note that synchronous API in NMSecretAgentOld, NMVpnPluginOld and NMVpnServicePlugin as not deprecated here. These types are not part of the D-Bus cache and while they have similar issues, it's less severe because they have less state.
2019-09-04 13:58:43 +02:00
_NM_DEPRECATED_SYNC_METHOD
gboolean nm_client_reload_connections (NMClient *client,
GCancellable *cancellable,
GError **error);
libnm: deprecate synchronous/blocking API in libnm Note that D-Bus is fundamentally asynchronous. Doing blocking calls on top of D-Bus is odd, especially for libnm's NMClient. That is because NMClient essentially is a client-side cache of the objects from the D-Bus interface. This cache should be filled exclusively by (asynchronous) D-Bus events (PropertiesChanged). So, making a blocking D-Bus call means to wait for a response and return it, while queuing all messages that are received in the meantime. Basically there are three ways how a synchronous API on NMClient could behave: 1) the call just calls g_dbus_connection_call_sync(). This means that libnm sends a D-Bus request via GDBusConnection, and blockingly waits for the response. All D-Bus messages that get received in the meantime are queued in the GMainContext that belongs to NMClient. That means, none of these D-Bus events are processed until we iterate the GMainContext after the call returns. The effect is, that NMClient (and all cached objects in there) are unaffected by the D-Bus request. Most of the synchronous API calls in libnm are of this kind. The problem is that the strict ordering of D-Bus events gets violated. For some API this is not an immediate problem. Take for example nm_device_wifi_request_scan(). The call merely blockingly tells NetworkManager to start scanning, but since NetworkManager's D-Bus API does not directly expose any state that tells whether we are currently scanning, this out of order processing of the D-Bus request is a small issue. The problem is more obvious for nm_client_networking_set_enabled(). After calling it, NM_CLIENT_NETWORKING_ENABLED is still unaffected and unchanged, because the PropertiesChanged signal from D-Bus is not yet processed. This means, while you make such a blocking call, NMClient's state does not change. But usually you perform the synchronous call to change some state. In this form, the blocking call is not useful, because NMClient only changes the state after iterating the GMainContext, and not after the blocking call returns. 2) like 1), but after making the blocking g_dbus_connection_call_sync(), update the NMClient cache artificially. This is what nm_manager_check_connectivity() does, to "fix" bgo#784629. This also has the problem of out-of-order events, but it kinda solves the problem of not changing the state during the blocking call. But it does so by hacking the state of the cache. I think this is really wrong because the state should only be updated from the ordered stream of D-Bus messages (PropertiesChanged signal and similar). When libnm decides to modify the state, there may be already D-Bus messages queued that affect this very state. 3) instead of calling g_dbus_connection_call_sync(), use the asynchronous g_dbus_connection_call(). If we would use a sepaate GMainContext for all D-Bus related calls, we could ensure that while we block for the response, we iterate that internal main context. This might be nice, because all events are processed in order and after the blocking call returns, the NMClient state is up to date. The are problems however: current blocking API does not do this, so it's a significant change in behavior. Also, it might be unexpected to the user that during the blocking call the entire content of NMClient's cache might change and all pointers to the cache might be invalidated. Also, of course NMClient would invoke signals for all the changes that happen. Another problem is that this would be more effort to implement and it involves a small performance overhead for all D-Bus related calls (because we have to serialize all events in an internal GMainContext first and then invoke them on the caller's context). Also, if the users wants this behavior, they could implement it themself by running libnm in their own GMainContext. Note that libnm might have bugs to make that really working, but that should be fixed instead of adding such synchrnous API behavior. Read also [1], for why blocking calls are wrong. [1] https://smcv.pseudorandom.co.uk/2008/11/nonblocking/ So, all possible behaviors for synchronous API have severe behavioural issues. Mark all this API as deprecated. Also, this serves the purpose of identifying blocking D-Bus calls in libnm. Note that "deprecated" here does not really mean that the API is going to be removed. We don't break API. The user may: - continue to use this API. It's deprecated, awkward and discouraged, but if it works, by all means use it. - use asynchronous API. That's the only sensible way to use D-Bus. If libnm lacks a certain asynchronous counterpart, it should be added. - use GDBusConnection directly. There really isn't anything wrong with D-Bus or GDBusConnection. This deprecated API is just a wrapper around g_dbus_connection_call_sync(). You may call it directly without feeling dirty. --- The only other remainging API is the synchronous GInitable call for NMClient. That is an entirely separate beast and not particularly wrong (from an API point of view). Note that synchronous API in NMSecretAgentOld, NMVpnPluginOld and NMVpnServicePlugin as not deprecated here. These types are not part of the D-Bus cache and while they have similar issues, it's less severe because they have less state.
2019-09-04 13:58:43 +02:00
void nm_client_reload_connections_async (NMClient *client,
GCancellable *cancellable,
GAsyncReadyCallback callback,
gpointer user_data);
gboolean nm_client_reload_connections_finish (NMClient *client,
GAsyncResult *result,
GError **error);
NM_AVAILABLE_IN_1_6
const char *nm_client_get_dns_mode (NMClient *client);
NM_AVAILABLE_IN_1_6
const char *nm_client_get_dns_rc_manager (NMClient *client);
NM_AVAILABLE_IN_1_6
const GPtrArray *nm_client_get_dns_configuration (NMClient *client);
2017-10-21 16:05:19 +02:00
NM_AVAILABLE_IN_1_12
const GPtrArray *nm_client_get_checkpoints (NMClient *client);
NM_AVAILABLE_IN_1_12
void nm_client_checkpoint_create (NMClient *client,
const GPtrArray *devices,
guint32 rollback_timeout,
NMCheckpointCreateFlags flags,
GCancellable *cancellable,
GAsyncReadyCallback callback,
gpointer user_data);
2017-10-21 16:05:19 +02:00
NM_AVAILABLE_IN_1_12
NMCheckpoint *nm_client_checkpoint_create_finish (NMClient *client,
GAsyncResult *result,
GError **error);
NM_AVAILABLE_IN_1_12
void nm_client_checkpoint_destroy (NMClient *client,
const char *checkpoint_path,
GCancellable *cancellable,
GAsyncReadyCallback callback,
gpointer user_data);
2017-10-21 16:05:19 +02:00
NM_AVAILABLE_IN_1_12
gboolean nm_client_checkpoint_destroy_finish (NMClient *client,
GAsyncResult *result,
GError **error);
NM_AVAILABLE_IN_1_12
void nm_client_checkpoint_rollback (NMClient *client,
const char *checkpoint_path,
GCancellable *cancellable,
GAsyncReadyCallback callback,
gpointer user_data);
2017-10-21 16:05:19 +02:00
NM_AVAILABLE_IN_1_12
GHashTable *nm_client_checkpoint_rollback_finish (NMClient *client,
GAsyncResult *result,
GError **error);
NM_AVAILABLE_IN_1_12
void nm_client_checkpoint_adjust_rollback_timeout (NMClient *client,
const char *checkpoint_path,
guint32 add_timeout,
GCancellable *cancellable,
GAsyncReadyCallback callback,
gpointer user_data);
NM_AVAILABLE_IN_1_12
gboolean nm_client_checkpoint_adjust_rollback_timeout_finish (NMClient *client,
GAsyncResult *result,
GError **error);
NM_AVAILABLE_IN_1_22
void nm_client_reload (NMClient *client,
NMManagerReloadFlags flags,
GCancellable *cancellable,
GAsyncReadyCallback callback,
gpointer user_data);
NM_AVAILABLE_IN_1_22
gboolean nm_client_reload_finish (NMClient *client,
GAsyncResult *result,
GError **error);
/*****************************************************************************/
NM_AVAILABLE_IN_1_24
void nm_client_dbus_call (NMClient *client,
const char *object_path,
const char *interface_name,
const char *method_name,
GVariant *parameters,
const GVariantType *reply_type,
int timeout_msec,
GCancellable *cancellable,
GAsyncReadyCallback callback,
gpointer user_data);
NM_AVAILABLE_IN_1_24
GVariant *nm_client_dbus_call_finish (NMClient *client,
GAsyncResult *result,
GError **error);
NM_AVAILABLE_IN_1_24
void nm_client_dbus_set_property (NMClient *client,
const char *object_path,
const char *interface_name,
const char *property_name,
GVariant *value,
int timeout_msec,
GCancellable *cancellable,
GAsyncReadyCallback callback,
gpointer user_data);
NM_AVAILABLE_IN_1_24
gboolean nm_client_dbus_set_property_finish (NMClient *client,
GAsyncResult *result,
GError **error);
G_END_DECLS
#endif /* __NM_CLIENT_H__ */