2013-03-27 22:23:24 +01:00
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: t; c-basic-offset: 4 -*- */
/* nm-linux-platform.c - Linux kernel & udev network configuration layer
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 , or ( at your option )
* any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License along
* with this program ; if not , write to the Free Software Foundation , Inc . ,
* 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA .
*
2015-04-14 23:14:06 +02:00
* Copyright ( C ) 2012 - 2015 Red Hat , Inc .
2013-03-27 22:23:24 +01:00
*/
2016-02-19 14:57:48 +01:00
# include "nm-default.h"
2013-03-27 22:23:24 +01:00
2016-03-01 09:56:51 +01:00
# include "nm-linux-platform.h"
2016-06-30 18:20:09 +02:00
# include <endian.h>
2013-03-27 22:23:24 +01:00
# include <errno.h>
# include <unistd.h>
# include <sys/socket.h>
2015-09-15 15:07:37 +02:00
# include <sys/ioctl.h>
2013-04-03 16:10:38 +02:00
# include <fcntl.h>
2014-02-23 14:22:32 +01:00
# include <dlfcn.h>
2015-10-14 10:01:48 +02:00
# include <arpa/inet.h>
2013-03-27 22:23:24 +01:00
# include <netinet/icmp6.h>
# include <netinet/in.h>
2013-05-21 12:49:24 -03:00
# include <linux/ip.h>
2013-03-27 22:23:24 +01:00
# include <linux/if_arp.h>
2013-05-06 09:16:17 -04:00
# include <linux/if_link.h>
2013-04-25 15:46:39 -04:00
# include <linux/if_tun.h>
2013-05-21 12:49:24 -03:00
# include <linux/if_tunnel.h>
2013-03-27 22:23:24 +01:00
# include <netlink/netlink.h>
2016-10-07 21:35:58 +02:00
# include <netlink/msg.h>
2013-05-29 12:00:50 -03:00
# include <gudev/gudev.h>
2013-03-27 22:23:24 +01:00
2016-03-01 09:56:51 +01:00
# include "nm-utils.h"
2015-02-22 11:54:03 +01:00
# include "nm-core-internal.h"
2016-03-01 09:56:51 +01:00
# include "nm-setting-vlan.h"
# include "nm-core-utils.h"
# include "nmp-object.h"
2016-02-19 01:06:28 +01:00
# include "nmp-netns.h"
2015-05-02 07:59:59 +02:00
# include "nm-platform-utils.h"
2013-10-16 12:29:13 -05:00
# include "wifi/wifi-utils.h"
2014-02-04 14:27:03 +01:00
# include "wifi/wifi-utils-wext.h"
2016-10-08 14:39:19 +02:00
# include "nm-utils/unaligned.h"
2013-03-27 22:23:24 +01:00
2015-10-27 17:24:11 +01:00
# define VLAN_FLAG_MVRP 0x8
2016-01-25 15:08:32 +01:00
/* nm-internal error codes for libnl. Make sure they don't overlap. */
# define _NLE_NM_NOBUFS 500
2016-11-30 16:40:54 +01:00
# define _NLE_MSG_TRUNC 501
2016-01-25 15:08:32 +01:00
2016-10-02 18:22:50 +02:00
/*****************************************************************************/
2015-04-18 14:21:54 +02:00
2015-10-12 16:07:01 +02:00
# define IFQDISCSIZ 32
2016-10-02 18:22:50 +02:00
/*****************************************************************************/
2015-10-12 16:07:01 +02:00
# ifndef IFLA_PROMISCUITY
# define IFLA_PROMISCUITY 30
# endif
# define IFLA_NUM_TX_QUEUES 31
# define IFLA_NUM_RX_QUEUES 32
# define IFLA_CARRIER 33
# define IFLA_PHYS_PORT_ID 34
# define IFLA_LINK_NETNSID 37
# define __IFLA_MAX 39
# define IFLA_INET6_TOKEN 7
# define IFLA_INET6_ADDR_GEN_MODE 8
# define __IFLA_INET6_MAX 9
# define IFLA_VLAN_PROTOCOL 5
# define __IFLA_VLAN_MAX 6
# define IFA_FLAGS 8
# define __IFA_MAX 9
2015-10-12 15:15:21 +02:00
# define IFLA_MACVLAN_FLAGS 2
# define __IFLA_MACVLAN_MAX 3
2015-11-11 18:41:48 +01:00
# define IFLA_IPTUN_LINK 1
# define IFLA_IPTUN_LOCAL 2
# define IFLA_IPTUN_REMOTE 3
# define IFLA_IPTUN_TTL 4
# define IFLA_IPTUN_TOS 5
2015-11-27 22:22:25 +01:00
# define IFLA_IPTUN_ENCAP_LIMIT 6
# define IFLA_IPTUN_FLOWINFO 7
2015-11-11 18:41:48 +01:00
# define IFLA_IPTUN_FLAGS 8
# define IFLA_IPTUN_PROTO 9
# define IFLA_IPTUN_PMTUDISC 10
# define __IFLA_IPTUN_MAX 19
# ifndef IFLA_IPTUN_MAX
# define IFLA_IPTUN_MAX (__IFLA_IPTUN_MAX - 1)
# endif
2015-10-12 15:15:21 +02:00
# ifndef MACVLAN_FLAG_NOPROMISC
# define MACVLAN_FLAG_NOPROMISC 1
# endif
2015-11-27 22:22:25 +01:00
# define IP6_FLOWINFO_TCLASS_MASK 0x0FF00000
# define IP6_FLOWINFO_TCLASS_SHIFT 20
# define IP6_FLOWINFO_FLOWLABEL_MASK 0x000FFFFF
2016-10-02 18:22:50 +02:00
/*****************************************************************************/
2015-10-12 16:07:01 +02:00
2016-06-30 18:20:09 +02:00
# define IFLA_MACSEC_UNSPEC 0
# define IFLA_MACSEC_SCI 1
# define IFLA_MACSEC_PORT 2
# define IFLA_MACSEC_ICV_LEN 3
# define IFLA_MACSEC_CIPHER_SUITE 4
# define IFLA_MACSEC_WINDOW 5
# define IFLA_MACSEC_ENCODING_SA 6
# define IFLA_MACSEC_ENCRYPT 7
# define IFLA_MACSEC_PROTECT 8
# define IFLA_MACSEC_INC_SCI 9
# define IFLA_MACSEC_ES 10
# define IFLA_MACSEC_SCB 11
# define IFLA_MACSEC_REPLAY_PROTECT 12
# define IFLA_MACSEC_VALIDATION 13
# define IFLA_MACSEC_PAD 14
# define __IFLA_MACSEC_MAX 15
/*****************************************************************************/
2015-08-20 00:07:14 +02:00
# define _NMLOG_PREFIX_NAME "platform-linux"
2015-08-30 16:08:51 +02:00
# define _NMLOG_DOMAIN LOGD_PLATFORM
# define _NMLOG2_DOMAIN LOGD_PLATFORM
2016-02-05 10:49:48 +01:00
# define _NMLOG(level, ...) _LOG ( level, _NMLOG_DOMAIN, platform, __VA_ARGS__)
# define _NMLOG_err(errsv, level, ...) _LOG_err (errsv, level, _NMLOG_DOMAIN, platform, __VA_ARGS__)
# define _NMLOG2(level, ...) _LOG ( level, _NMLOG2_DOMAIN, NULL, __VA_ARGS__)
# define _NMLOG2_err(errsv, level, ...) _LOG_err (errsv, level, _NMLOG2_DOMAIN, NULL, __VA_ARGS__)
# define _LOG_print(__level, __domain, __errsv, self, ...) \
G_STMT_START { \
char __prefix [ 32 ] ; \
const char * __p_prefix = _NMLOG_PREFIX_NAME ; \
const void * const __self = ( self ) ; \
\
if ( __self & & __self ! = nm_platform_try_get ( ) ) { \
g_snprintf ( __prefix , sizeof ( __prefix ) , " %s[%p] " , _NMLOG_PREFIX_NAME , __self ) ; \
__p_prefix = __prefix ; \
} \
_nm_log ( __level , __domain , __errsv , \
" %s: " _NM_UTILS_MACRO_FIRST ( __VA_ARGS__ ) , \
__p_prefix _NM_UTILS_MACRO_REST ( __VA_ARGS__ ) ) ; \
} G_STMT_END
2015-04-18 14:21:54 +02:00
# define _LOG(level, domain, self, ...) \
G_STMT_START { \
const NMLogLevel __level = ( level ) ; \
const NMLogDomain __domain = ( domain ) ; \
\
if ( nm_logging_enabled ( __level , __domain ) ) { \
2016-02-05 10:49:48 +01:00
_LOG_print ( __level , __domain , 0 , self , __VA_ARGS__ ) ; \
} \
} G_STMT_END
# define _LOG_err(errsv, level, domain, self, ...) \
G_STMT_START { \
const NMLogLevel __level = ( level ) ; \
const NMLogDomain __domain = ( domain ) ; \
\
if ( nm_logging_enabled ( __level , __domain ) ) { \
int __errsv = ( errsv ) ; \
2015-04-18 14:21:54 +02:00
\
2016-02-05 10:49:48 +01:00
/* The %m format specifier (GNU extension) would alread allow you to specify the error
* message conveniently ( and nm_log would get that right too ) . But we don ' t want to depend
* on that , so instead append the message at the end .
* Currently users are expected not to use % m in the format string . */ \
_LOG_print ( __level , __domain , __errsv , self , \
_NM_UTILS_MACRO_FIRST ( __VA_ARGS__ ) " : %s (%d) " \
_NM_UTILS_MACRO_REST ( __VA_ARGS__ ) , \
g_strerror ( __errsv ) , __errsv ) ; \
2015-04-18 14:21:54 +02:00
} \
} G_STMT_END
2016-02-05 10:49:48 +01:00
2015-09-01 22:11:47 +02:00
# define LOG_FMT_IP_TUNNEL "adding %s '%s' parent %u local %s remote %s"
2015-04-06 18:29:36 +02:00
/******************************************************************
* Forward declarations and enums
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2016-04-20 12:06:43 +02:00
typedef enum {
INFINIBAND_ACTION_CREATE_CHILD ,
INFINIBAND_ACTION_DELETE_CHILD ,
} InfinibandAction ;
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
enum {
DELAYED_ACTION_IDX_REFRESH_ALL_LINKS ,
DELAYED_ACTION_IDX_REFRESH_ALL_IP4_ADDRESSES ,
DELAYED_ACTION_IDX_REFRESH_ALL_IP6_ADDRESSES ,
DELAYED_ACTION_IDX_REFRESH_ALL_IP4_ROUTES ,
DELAYED_ACTION_IDX_REFRESH_ALL_IP6_ROUTES ,
_DELAYED_ACTION_IDX_REFRESH_ALL_NUM ,
} ;
2015-04-06 18:29:36 +02:00
typedef enum {
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
DELAYED_ACTION_TYPE_NONE = 0 ,
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
DELAYED_ACTION_TYPE_REFRESH_ALL_LINKS = ( 1LL < < DELAYED_ACTION_IDX_REFRESH_ALL_LINKS ) ,
DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ADDRESSES = ( 1LL < < DELAYED_ACTION_IDX_REFRESH_ALL_IP4_ADDRESSES ) ,
DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ADDRESSES = ( 1LL < < DELAYED_ACTION_IDX_REFRESH_ALL_IP6_ADDRESSES ) ,
DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ROUTES = ( 1LL < < DELAYED_ACTION_IDX_REFRESH_ALL_IP4_ROUTES ) ,
DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ROUTES = ( 1LL < < DELAYED_ACTION_IDX_REFRESH_ALL_IP6_ROUTES ) ,
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
DELAYED_ACTION_TYPE_REFRESH_LINK = ( 1LL < < 5 ) ,
DELAYED_ACTION_TYPE_MASTER_CONNECTED = ( 1LL < < 6 ) ,
DELAYED_ACTION_TYPE_READ_NETLINK = ( 1LL < < 7 ) ,
2015-12-14 14:47:41 +01:00
DELAYED_ACTION_TYPE_WAIT_FOR_NL_RESPONSE = ( 1LL < < 8 ) ,
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
__DELAYED_ACTION_TYPE_MAX ,
DELAYED_ACTION_TYPE_REFRESH_ALL = DELAYED_ACTION_TYPE_REFRESH_ALL_LINKS |
DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ADDRESSES |
DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ADDRESSES |
DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ROUTES |
DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ROUTES ,
DELAYED_ACTION_TYPE_MAX = __DELAYED_ACTION_TYPE_MAX - 1 ,
2015-04-06 18:29:36 +02:00
} DelayedActionType ;
2016-04-07 17:14:03 +02:00
# define FOR_EACH_DELAYED_ACTION(iflags, flags_all) \
for ( ( iflags ) = ( DelayedActionType ) 0x1LL ; ( iflags ) < = DELAYED_ACTION_TYPE_MAX ; ( iflags ) < < = 1 ) \
if ( NM_FLAGS_HAS ( flags_all , iflags ) )
2015-12-14 14:47:41 +01:00
typedef enum {
/* Negative values are errors from kernel. Add dummy member to
* make enum signed . */
_WAIT_FOR_NL_RESPONSE_RESULT_SYSTEM_ERROR = - 1 ,
WAIT_FOR_NL_RESPONSE_RESULT_UNKNOWN = 0 ,
WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_OK ,
WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_UNKNOWN ,
WAIT_FOR_NL_RESPONSE_RESULT_FAILED_RESYNC ,
WAIT_FOR_NL_RESPONSE_RESULT_FAILED_POLL ,
WAIT_FOR_NL_RESPONSE_RESULT_FAILED_TIMEOUT ,
WAIT_FOR_NL_RESPONSE_RESULT_FAILED_DISPOSING ,
} WaitForNlResponseResult ;
typedef void ( * WaitForNlResponseCallback ) ( NMPlatform * platform ,
guint32 seq_number ,
WaitForNlResponseResult seq_result ,
gpointer user_data ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
static void delayed_action_schedule ( NMPlatform * platform , DelayedActionType action_type , gpointer user_data ) ;
2015-06-19 15:38:41 +02:00
static gboolean delayed_action_handle_all ( NMPlatform * platform , gboolean read_netlink ) ;
2015-12-14 14:47:41 +01:00
static void do_request_link_no_delayed_actions ( NMPlatform * platform , int ifindex , const char * name ) ;
static void do_request_all_no_delayed_actions ( NMPlatform * platform , DelayedActionType action_type ) ;
2015-04-06 18:29:36 +02:00
static void cache_pre_hook ( NMPCache * cache , const NMPObject * old , const NMPObject * new , NMPCacheOpsType ops_type , gpointer user_data ) ;
2015-12-14 14:47:41 +01:00
static void cache_prune_candidates_prune ( NMPlatform * platform ) ;
2015-12-15 10:40:41 +01:00
static gboolean event_handler_read_netlink ( NMPlatform * platform , gboolean wait_for_acks ) ;
2016-12-09 10:11:29 +01:00
static void ASSERT_NETNS_CURRENT ( NMPlatform * platform ) ;
2015-04-23 23:16:00 +02:00
2015-12-14 14:47:41 +01:00
/*****************************************************************************/
static const char *
wait_for_nl_response_to_string ( WaitForNlResponseResult seq_result , char * buf , gsize buf_size )
{
char * buf0 = buf ;
switch ( seq_result ) {
case WAIT_FOR_NL_RESPONSE_RESULT_UNKNOWN :
nm_utils_strbuf_append_str ( & buf , & buf_size , " unknown " ) ;
break ;
case WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_OK :
nm_utils_strbuf_append_str ( & buf , & buf_size , " success " ) ;
break ;
case WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_UNKNOWN :
nm_utils_strbuf_append_str ( & buf , & buf_size , " failure " ) ;
break ;
default :
if ( seq_result < 0 )
nm_utils_strbuf_append ( & buf , & buf_size , " failure %d (%s) " , - ( ( int ) seq_result ) , g_strerror ( - ( ( int ) seq_result ) ) ) ;
else
nm_utils_strbuf_append ( & buf , & buf_size , " internal failure %d " , ( int ) seq_result ) ;
break ;
}
return buf0 ;
}
2015-10-13 17:53:23 +02:00
/******************************************************************
* Support IFLA_INET6_ADDR_GEN_MODE
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
static int _support_user_ipv6ll = 0 ;
# define _support_user_ipv6ll_still_undecided() (G_UNLIKELY (_support_user_ipv6ll == 0))
static gboolean
_support_user_ipv6ll_get ( void )
{
if ( _support_user_ipv6ll_still_undecided ( ) ) {
_support_user_ipv6ll = - 1 ;
_LOG2W ( " kernel support for IFLA_INET6_ADDR_GEN_MODE %s " , " failed to detect; assume no support " ) ;
2015-10-20 09:27:16 +02:00
return FALSE ;
}
return _support_user_ipv6ll > 0 ;
2015-10-13 17:53:23 +02:00
}
static void
2015-10-12 16:07:01 +02:00
_support_user_ipv6ll_detect ( struct nlattr * * tb )
2015-10-13 17:53:23 +02:00
{
if ( _support_user_ipv6ll_still_undecided ( ) ) {
2015-10-12 16:07:01 +02:00
if ( tb [ IFLA_INET6_ADDR_GEN_MODE ] ) {
2015-10-13 17:53:23 +02:00
_support_user_ipv6ll = 1 ;
_LOG2D ( " kernel support for IFLA_INET6_ADDR_GEN_MODE %s " , " detected " ) ;
} else {
_support_user_ipv6ll = - 1 ;
_LOG2D ( " kernel support for IFLA_INET6_ADDR_GEN_MODE %s " , " not detected " ) ;
}
}
}
/******************************************************************
* Various utilities
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2015-10-27 16:14:54 +01:00
static int
_vlan_qos_mapping_cmp_from ( gconstpointer a , gconstpointer b , gpointer user_data )
{
const NMVlanQosMapping * map_a = a ;
const NMVlanQosMapping * map_b = b ;
if ( map_a - > from ! = map_b - > from )
return map_a - > from < map_b - > from ? - 1 : 1 ;
return 0 ;
}
static int
_vlan_qos_mapping_cmp_from_ptr ( gconstpointer a , gconstpointer b , gpointer user_data )
{
return _vlan_qos_mapping_cmp_from ( * ( ( const NMVlanQosMapping * * ) a ) ,
* ( ( const NMVlanQosMapping * * ) b ) ,
NULL ) ;
}
2015-10-13 17:53:23 +02:00
/******************************************************************
* NMLinkType functions
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
typedef struct {
const NMLinkType nm_type ;
const char * type_string ;
/* IFLA_INFO_KIND / rtnl_link_get_type() where applicable; the rtnl type
* should only be specified if the device type can be created without
* additional parameters , and if the device type can be determined from
* the rtnl_type . eg , tun / tap should not be specified since both
* tun and tap devices use " tun " , and InfiniBand should not be
* specified because a PKey is required at creation . Drivers set this
* value from their ' struct rtnl_link_ops ' structure .
*/
const char * rtnl_type ;
/* uevent DEVTYPE where applicable, from /sys/class/net/<ifname>/uevent;
* drivers set this value from their SET_NETDEV_DEV ( ) call and the
* ' struct device_type ' name member .
*/
const char * devtype ;
} LinkDesc ;
static const LinkDesc linktypes [ ] = {
{ NM_LINK_TYPE_NONE , " none " , NULL , NULL } ,
{ NM_LINK_TYPE_UNKNOWN , " unknown " , NULL , NULL } ,
{ NM_LINK_TYPE_ETHERNET , " ethernet " , NULL , NULL } ,
{ NM_LINK_TYPE_INFINIBAND , " infiniband " , NULL , NULL } ,
{ NM_LINK_TYPE_OLPC_MESH , " olpc-mesh " , NULL , NULL } ,
{ NM_LINK_TYPE_WIFI , " wifi " , NULL , " wlan " } ,
2016-06-14 11:19:15 -05:00
{ NM_LINK_TYPE_WWAN_NET , " wwan " , NULL , " wwan " } ,
2015-10-13 17:53:23 +02:00
{ NM_LINK_TYPE_WIMAX , " wimax " , " wimax " , " wimax " } ,
{ NM_LINK_TYPE_DUMMY , " dummy " , " dummy " , NULL } ,
{ NM_LINK_TYPE_GRE , " gre " , " gre " , NULL } ,
{ NM_LINK_TYPE_GRETAP , " gretap " , " gretap " , NULL } ,
{ NM_LINK_TYPE_IFB , " ifb " , " ifb " , NULL } ,
2015-11-27 22:22:25 +01:00
{ NM_LINK_TYPE_IP6TNL , " ip6tnl " , " ip6tnl " , NULL } ,
2015-11-27 14:01:56 +01:00
{ NM_LINK_TYPE_IPIP , " ipip " , " ipip " , NULL } ,
2015-10-13 17:53:23 +02:00
{ NM_LINK_TYPE_LOOPBACK , " loopback " , NULL , NULL } ,
2016-06-30 18:20:09 +02:00
{ NM_LINK_TYPE_MACSEC , " macsec " , " macsec " , NULL } ,
2015-10-13 17:53:23 +02:00
{ NM_LINK_TYPE_MACVLAN , " macvlan " , " macvlan " , NULL } ,
{ NM_LINK_TYPE_MACVTAP , " macvtap " , " macvtap " , NULL } ,
{ NM_LINK_TYPE_OPENVSWITCH , " openvswitch " , " openvswitch " , NULL } ,
2015-11-11 18:41:48 +01:00
{ NM_LINK_TYPE_SIT , " sit " , " sit " , NULL } ,
2015-10-13 17:53:23 +02:00
{ NM_LINK_TYPE_TAP , " tap " , NULL , NULL } ,
{ NM_LINK_TYPE_TUN , " tun " , NULL , NULL } ,
{ NM_LINK_TYPE_VETH , " veth " , " veth " , NULL } ,
{ NM_LINK_TYPE_VLAN , " vlan " , " vlan " , " vlan " } ,
{ NM_LINK_TYPE_VXLAN , " vxlan " , " vxlan " , " vxlan " } ,
{ NM_LINK_TYPE_BNEP , " bluetooth " , NULL , " bluetooth " } ,
{ NM_LINK_TYPE_BRIDGE , " bridge " , " bridge " , " bridge " } ,
{ NM_LINK_TYPE_BOND , " bond " , " bond " , " bond " } ,
{ NM_LINK_TYPE_TEAM , " team " , " team " , NULL } ,
} ;
static const char *
nm_link_type_to_rtnl_type_string ( NMLinkType type )
{
int i ;
for ( i = 0 ; i < G_N_ELEMENTS ( linktypes ) ; i + + ) {
if ( type = = linktypes [ i ] . nm_type )
return linktypes [ i ] . rtnl_type ;
}
g_return_val_if_reached ( NULL ) ;
}
const char *
nm_link_type_to_string ( NMLinkType type )
{
int i ;
for ( i = 0 ; i < G_N_ELEMENTS ( linktypes ) ; i + + ) {
if ( type = = linktypes [ i ] . nm_type )
return linktypes [ i ] . type_string ;
}
g_return_val_if_reached ( NULL ) ;
}
/******************************************************************
* Utilities
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* _timestamp_nl_to_ms:
* @ timestamp_nl : a timestamp from ifa_cacheinfo .
* @ monotonic_ms : * now * in CLOCK_MONOTONIC . Needed to estimate the current
* uptime and how often timestamp_nl wrapped .
*
* Convert the timestamp from ifa_cacheinfo to CLOCK_MONOTONIC milliseconds .
* The ifa_cacheinfo fields tstamp and cstamp contains timestamps that counts
* with in 1 / 100 th of a second of clock_gettime ( CLOCK_MONOTONIC ) . However ,
* the uint32 counter wraps every 497 days of uptime , so we have to compensate
* for that . */
static gint64
_timestamp_nl_to_ms ( guint32 timestamp_nl , gint64 monotonic_ms )
{
const gint64 WRAP_INTERVAL = ( ( ( gint64 ) G_MAXUINT32 ) + 1 ) * ( 1000 / 100 ) ;
gint64 timestamp_nl_ms ;
/* convert timestamp from 1/100th of a second to msec. */
timestamp_nl_ms = ( ( gint64 ) timestamp_nl ) * ( 1000 / 100 ) ;
/* timestamp wraps every 497 days. Try to compensate for that.*/
if ( timestamp_nl_ms > monotonic_ms ) {
/* timestamp_nl_ms is in the future. Truncate it to *now* */
timestamp_nl_ms = monotonic_ms ;
} else if ( monotonic_ms > = WRAP_INTERVAL ) {
timestamp_nl_ms + = ( monotonic_ms / WRAP_INTERVAL ) * WRAP_INTERVAL ;
if ( timestamp_nl_ms > monotonic_ms )
timestamp_nl_ms - = WRAP_INTERVAL ;
}
return timestamp_nl_ms ;
}
static guint32
2015-10-12 16:07:01 +02:00
_addrtime_timestamp_to_nm ( guint32 timestamp , gint32 * out_now_nm )
2015-10-13 17:53:23 +02:00
{
struct timespec tp ;
gint64 now_nl , now_nm , result ;
int err ;
/* timestamp is unset. Default to 1. */
2015-10-12 16:07:01 +02:00
if ( ! timestamp ) {
2015-10-13 17:53:23 +02:00
if ( out_now_nm )
* out_now_nm = 0 ;
return 1 ;
}
/* do all the calculations in milliseconds scale */
err = clock_gettime ( CLOCK_MONOTONIC , & tp ) ;
g_assert ( err = = 0 ) ;
now_nm = nm_utils_get_monotonic_timestamp_ms ( ) ;
now_nl = ( ( ( gint64 ) tp . tv_sec ) * ( ( gint64 ) 1000 ) ) +
( tp . tv_nsec / ( NM_UTILS_NS_PER_SECOND / 1000 ) ) ;
2015-10-12 16:07:01 +02:00
result = now_nm - ( now_nl - _timestamp_nl_to_ms ( timestamp , now_nl ) ) ;
2015-10-13 17:53:23 +02:00
if ( out_now_nm )
* out_now_nm = now_nm / 1000 ;
2015-10-12 16:07:01 +02:00
/* converting the timestamp into nm_utils_get_monotonic_timestamp_ms() scale is
2015-10-13 17:53:23 +02:00
* a good guess but fails in the following situations :
*
* - If the address existed before start of the process , the timestamp in nm scale would
* be negative or zero . In this case we default to 1.
2015-10-12 16:07:01 +02:00
* - during hibernation , the CLOCK_MONOTONIC / timestamp drifts from
2015-10-13 17:53:23 +02:00
* nm_utils_get_monotonic_timestamp_ms ( ) scale .
*/
if ( result < = 1000 )
return 1 ;
if ( result > now_nm )
return now_nm / 1000 ;
return result / 1000 ;
}
static guint32
2015-10-12 16:07:01 +02:00
_addrtime_extend_lifetime ( guint32 lifetime , guint32 seconds )
2015-10-13 17:53:23 +02:00
{
guint64 v ;
if ( lifetime = = NM_PLATFORM_LIFETIME_PERMANENT
| | seconds = = 0 )
return lifetime ;
v = ( guint64 ) lifetime + ( guint64 ) seconds ;
return MIN ( v , NM_PLATFORM_LIFETIME_PERMANENT - 1 ) ;
}
/* The rtnl_addr object contains relative lifetimes @valid and @preferred
* that count in seconds , starting from the moment when the kernel constructed
* the netlink message .
*
* There is also a field rtnl_addr_last_update_time ( ) , which is the absolute
* time in 1 / 100 th of a second of clock_gettime ( CLOCK_MONOTONIC ) when the address
* was modified ( wrapping every 497 days ) .
* Immediately at the time when the address was last modified , # NOW and @ last_update_time
* are the same , so ( only ) in that case @ valid and @ preferred are anchored at @ last_update_time .
* However , this is not true in general . As time goes by , whenever kernel sends a new address
* via netlink , the lifetimes keep counting down .
* */
static void
2015-10-12 16:07:01 +02:00
_addrtime_get_lifetimes ( guint32 timestamp ,
guint32 lifetime ,
guint32 preferred ,
guint32 * out_timestamp ,
guint32 * out_lifetime ,
guint32 * out_preferred )
2015-10-13 17:53:23 +02:00
{
gint32 now ;
if ( lifetime ! = NM_PLATFORM_LIFETIME_PERMANENT
| | preferred ! = NM_PLATFORM_LIFETIME_PERMANENT ) {
if ( preferred > lifetime )
preferred = lifetime ;
2015-10-12 16:07:01 +02:00
timestamp = _addrtime_timestamp_to_nm ( timestamp , & now ) ;
2015-10-13 17:53:23 +02:00
if ( now = = 0 ) {
/* strange. failed to detect the last-update time and assumed that timestamp is 1. */
nm_assert ( timestamp = = 1 ) ;
now = nm_utils_get_monotonic_timestamp_s ( ) ;
}
if ( timestamp < now ) {
guint32 diff = now - timestamp ;
2015-10-12 16:07:01 +02:00
lifetime = _addrtime_extend_lifetime ( lifetime , diff ) ;
preferred = _addrtime_extend_lifetime ( preferred , diff ) ;
2015-10-13 17:53:23 +02:00
} else
nm_assert ( timestamp = = now ) ;
2015-10-12 16:07:01 +02:00
} else
timestamp = 0 ;
2015-10-13 17:53:23 +02:00
* out_timestamp = timestamp ;
* out_lifetime = lifetime ;
* out_preferred = preferred ;
}
2016-10-02 18:22:50 +02:00
/*****************************************************************************/
2015-10-12 16:07:01 +02:00
static const NMPObject *
_lookup_cached_link ( const NMPCache * cache , int ifindex , gboolean * completed_from_cache , const NMPObject * * link_cached )
{
const NMPObject * obj ;
nm_assert ( completed_from_cache & & link_cached ) ;
if ( ! * completed_from_cache ) {
obj = ifindex > 0 & & cache ? nmp_cache_lookup_link ( cache , ifindex ) : NULL ;
2016-02-15 17:17:07 +01:00
if ( obj & & obj - > _link . netlink . is_in_netlink )
2015-10-12 16:07:01 +02:00
* link_cached = obj ;
else
* link_cached = NULL ;
* completed_from_cache = TRUE ;
}
return * link_cached ;
}
2016-10-02 18:22:50 +02:00
/*****************************************************************************/
2015-10-12 16:07:01 +02:00
# define DEVTYPE_PREFIX "DEVTYPE="
static char *
2016-12-09 10:43:06 +01:00
_linktype_read_devtype ( int dirfd )
2015-10-12 16:07:01 +02:00
{
char * contents = NULL ;
char * cont , * end ;
2016-12-09 10:43:06 +01:00
nm_assert ( dirfd > = 0 ) ;
2016-04-21 11:10:13 +02:00
2016-12-09 10:43:06 +01:00
if ( nm_utils_file_get_contents ( dirfd , " uevent " , 1 * 1024 * 1024 , & contents , NULL , NULL ) < 0 )
2015-10-12 16:07:01 +02:00
return NULL ;
for ( cont = contents ; cont ; cont = end ) {
end = strpbrk ( cont , " \r \n " ) ;
if ( end )
* end + + = ' \0 ' ;
2016-02-12 12:34:43 +01:00
if ( strncmp ( cont , DEVTYPE_PREFIX , NM_STRLEN ( DEVTYPE_PREFIX ) ) = = 0 ) {
cont + = NM_STRLEN ( DEVTYPE_PREFIX ) ;
2015-10-12 16:07:01 +02:00
memmove ( contents , cont , strlen ( cont ) + 1 ) ;
return contents ;
}
}
g_free ( contents ) ;
return NULL ;
}
static NMLinkType
_linktype_get_type ( NMPlatform * platform ,
const NMPCache * cache ,
const char * kind ,
int ifindex ,
const char * ifname ,
unsigned flags ,
unsigned arptype ,
gboolean * completed_from_cache ,
const NMPObject * * link_cached ,
const char * * out_kind )
{
guint i ;
2016-12-09 10:11:29 +01:00
ASSERT_NETNS_CURRENT ( platform ) ;
2016-12-12 14:06:44 +01:00
nm_assert ( ifname ) ;
2016-02-19 01:06:28 +01:00
2015-10-16 11:28:34 +02:00
if ( completed_from_cache ) {
2015-10-12 16:07:01 +02:00
const NMPObject * obj ;
obj = _lookup_cached_link ( cache , ifindex , completed_from_cache , link_cached ) ;
2015-10-16 11:28:34 +02:00
/* If we detected the link type before, we stick to that
2016-04-26 10:21:51 +02:00
* decision unless the " kind " no " name " changed . If " name " changed ,
* it means that their type may not have been determined correctly
* due to race conditions while accessing sysfs .
2015-10-16 11:28:34 +02:00
*
* This way , we save edditional ethtool / sysctl lookups , but moreover ,
* we keep the linktype stable and don ' t change it as long as the link
* exists .
*
* Note that kernel * can * reuse the ifindex ( on integer overflow , and
* when moving interfce to other netns ) . Thus here there is a tiny potential
* of messing stuff up . */
if ( obj
& & ! NM_IN_SET ( obj - > link . type , NM_LINK_TYPE_UNKNOWN , NM_LINK_TYPE_NONE )
2016-12-12 14:06:44 +01:00
& & nm_streq ( ifname , obj - > link . name )
2015-10-16 11:28:34 +02:00
& & ( ! kind
| | ! g_strcmp0 ( kind , obj - > link . kind ) ) ) {
nm_assert ( obj - > link . kind = = g_intern_string ( obj - > link . kind ) ) ;
* out_kind = obj - > link . kind ;
return obj - > link . type ;
}
2015-10-12 16:07:01 +02:00
}
2015-10-16 11:28:34 +02:00
* out_kind = g_intern_string ( kind ) ;
2015-10-12 16:07:01 +02:00
if ( kind ) {
for ( i = 0 ; i < G_N_ELEMENTS ( linktypes ) ; i + + ) {
if ( g_strcmp0 ( kind , linktypes [ i ] . rtnl_type ) = = 0 )
return linktypes [ i ] . nm_type ;
}
if ( ! strcmp ( kind , " tun " ) ) {
NMPlatformTunProperties props ;
if ( platform
2017-01-16 15:51:05 +01:00
& & nm_platform_link_tun_get_properties ( platform , ifindex , & props ) ) {
2015-10-12 16:07:01 +02:00
if ( ! g_strcmp0 ( props . mode , " tap " ) )
return NM_LINK_TYPE_TAP ;
if ( ! g_strcmp0 ( props . mode , " tun " ) )
return NM_LINK_TYPE_TUN ;
}
/* try guessing the type using the link flags instead... */
if ( flags & IFF_POINTOPOINT )
return NM_LINK_TYPE_TUN ;
return NM_LINK_TYPE_TAP ;
}
}
if ( arptype = = ARPHRD_LOOPBACK )
return NM_LINK_TYPE_LOOPBACK ;
else if ( arptype = = ARPHRD_INFINIBAND )
return NM_LINK_TYPE_INFINIBAND ;
2015-11-11 18:41:48 +01:00
else if ( arptype = = ARPHRD_SIT )
return NM_LINK_TYPE_SIT ;
2015-11-27 22:22:25 +01:00
else if ( arptype = = ARPHRD_TUNNEL6 )
return NM_LINK_TYPE_IP6TNL ;
2015-10-12 16:07:01 +02:00
2016-12-12 14:06:44 +01:00
{
2016-12-12 13:47:52 +01:00
NMPUtilsEthtoolDriverInfo driver_info ;
2015-10-12 16:07:01 +02:00
/* Fallback OVS detection for kernel <= 3.16 */
2016-12-12 13:47:52 +01:00
if ( nmp_utils_ethtool_get_driver_info ( ifindex , & driver_info ) ) {
if ( nm_streq ( driver_info . driver , " openvswitch " ) )
2015-10-12 16:07:01 +02:00
return NM_LINK_TYPE_OPENVSWITCH ;
if ( arptype = = 256 ) {
/* Some s390 CTC-type devices report 256 for the encapsulation type
* for some reason , but we need to call them Ethernet .
*/
2016-12-12 13:47:52 +01:00
if ( nm_streq ( driver_info . driver , " ctcm " ) )
2015-10-12 16:07:01 +02:00
return NM_LINK_TYPE_ETHERNET ;
}
}
2016-12-12 14:06:44 +01:00
}
{
nm_auto_close int dirfd = - 1 ;
gs_free char * devtype = NULL ;
char ifname_verified [ IFNAMSIZ ] ;
2015-10-12 16:07:01 +02:00
2016-12-09 10:43:06 +01:00
dirfd = nmp_utils_sysctl_open_netdir ( ifindex , ifname , ifname_verified ) ;
if ( dirfd > = 0 ) {
if ( faccessat ( dirfd , " anycast_mask " , F_OK , 0 ) = = 0 )
return NM_LINK_TYPE_OLPC_MESH ;
devtype = _linktype_read_devtype ( dirfd ) ;
for ( i = 0 ; devtype & & i < G_N_ELEMENTS ( linktypes ) ; i + + ) {
if ( g_strcmp0 ( devtype , linktypes [ i ] . devtype ) = = 0 ) {
if ( linktypes [ i ] . nm_type = = NM_LINK_TYPE_BNEP ) {
/* Both BNEP and 6lowpan use DEVTYPE=bluetooth, so we must
* use arptype to distinguish between them .
*/
if ( arptype ! = ARPHRD_ETHER )
continue ;
}
return linktypes [ i ] . nm_type ;
2015-10-12 16:07:01 +02:00
}
}
2016-12-09 10:43:06 +01:00
/* Fallback for drivers that don't call SET_NETDEV_DEVTYPE() */
if ( wifi_utils_is_wifi ( dirfd , ifname_verified ) )
return NM_LINK_TYPE_WIFI ;
}
2015-10-12 16:07:01 +02:00
2016-01-20 12:45:21 +01:00
if ( arptype = = ARPHRD_ETHER ) {
2016-06-14 11:35:17 -05:00
/* Misc non-upstream WWAN drivers. rmnet is Qualcomm's proprietary
* modem interface , ccmni is MediaTek ' s . FIXME : these drivers should
* really set devtype = WWAN .
*/
if ( g_str_has_prefix ( ifname , " rmnet " ) | |
g_str_has_prefix ( ifname , " rev_rmnet " ) | |
g_str_has_prefix ( ifname , " ccmni " ) )
return NM_LINK_TYPE_WWAN_NET ;
2016-01-20 12:45:21 +01:00
/* Standard wired ethernet interfaces don't report an rtnl_link_type, so
* only allow fallback to Ethernet if no type is given . This should
* prevent future virtual network drivers from being treated as Ethernet
* when they should be Generic instead .
*/
if ( ! kind & & ! devtype )
return NM_LINK_TYPE_ETHERNET ;
/* The USB gadget interfaces behave and look like ordinary ethernet devices
* aside from the DEVTYPE . */
if ( ! g_strcmp0 ( devtype , " gadget " ) )
return NM_LINK_TYPE_ETHERNET ;
}
2015-10-12 16:07:01 +02:00
}
return NM_LINK_TYPE_UNKNOWN ;
}
2014-10-22 18:19:54 +02:00
/******************************************************************
* libnl unility functions and wrappers
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2015-10-20 09:27:16 +02:00
# define nm_auto_nlmsg __attribute__((cleanup(_nm_auto_nl_msg_cleanup)))
2013-03-27 22:23:24 +01:00
static void
2015-10-20 09:27:16 +02:00
_nm_auto_nl_msg_cleanup ( void * ptr )
2013-03-27 22:23:24 +01:00
{
2015-10-20 09:27:16 +02:00
nlmsg_free ( * ( ( struct nl_msg * * ) ptr ) ) ;
2013-03-27 22:23:24 +01:00
}
2015-05-29 11:12:15 +02:00
static const char *
_nl_nlmsg_type_to_str ( guint16 type , char * buf , gsize len )
{
const char * str_type = NULL ;
switch ( type ) {
case RTM_NEWLINK : str_type = " NEWLINK " ; break ;
case RTM_DELLINK : str_type = " DELLINK " ; break ;
case RTM_NEWADDR : str_type = " NEWADDR " ; break ;
case RTM_DELADDR : str_type = " DELADDR " ; break ;
case RTM_NEWROUTE : str_type = " NEWROUTE " ; break ;
case RTM_DELROUTE : str_type = " DELROUTE " ; break ;
}
if ( str_type )
g_strlcpy ( buf , str_type , len ) ;
else
g_snprintf ( buf , len , " (%d) " , type ) ;
return buf ;
}
2015-10-12 16:07:01 +02:00
/******************************************************************
* NMPObject / netlink functions
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2015-05-29 09:40:24 +02:00
2015-10-12 16:07:01 +02:00
# define _check_addr_or_errout(tb, attr, addr_len) \
( { \
const struct nlattr * __t = ( tb ) [ ( attr ) ] ; \
\
if ( __t ) { \
if ( nla_len ( __t ) ! = ( addr_len ) ) { \
goto errout ; \
} \
} \
! ! __t ; \
} )
2015-05-29 09:40:24 +02:00
2015-10-12 16:07:01 +02:00
/*****************************************************************************/
2015-05-29 09:40:24 +02:00
2015-10-12 16:07:01 +02:00
/* Copied and heavily modified from libnl3's inet6_parse_protinfo(). */
static gboolean
_parse_af_inet6 ( NMPlatform * platform ,
struct nlattr * attr ,
2016-04-29 21:25:43 +02:00
NMUtilsIPv6IfaceId * out_token ,
gboolean * out_token_valid ,
guint8 * out_addr_gen_mode_inv ,
gboolean * out_addr_gen_mode_valid )
2015-10-12 16:07:01 +02:00
{
static struct nla_policy policy [ IFLA_INET6_MAX + 1 ] = {
[ IFLA_INET6_FLAGS ] = { . type = NLA_U32 } ,
2017-01-16 16:11:05 +01:00
[ IFLA_INET6_CACHEINFO ] = { . minlen = nm_offsetofend ( struct ifla_cacheinfo , retrans_time ) } ,
2015-10-12 16:07:01 +02:00
[ IFLA_INET6_CONF ] = { . minlen = 4 } ,
[ IFLA_INET6_STATS ] = { . minlen = 8 } ,
[ IFLA_INET6_ICMP6STATS ] = { . minlen = 8 } ,
[ IFLA_INET6_TOKEN ] = { . minlen = sizeof ( struct in6_addr ) } ,
[ IFLA_INET6_ADDR_GEN_MODE ] = { . type = NLA_U8 } ,
} ;
struct nlattr * tb [ IFLA_INET6_MAX + 1 ] ;
2015-05-29 09:40:24 +02:00
int err ;
2015-10-12 16:07:01 +02:00
struct in6_addr i6_token ;
2016-04-29 21:25:43 +02:00
gboolean token_valid = FALSE ;
gboolean addr_gen_mode_valid = FALSE ;
2015-10-12 16:07:01 +02:00
guint8 i6_addr_gen_mode_inv = 0 ;
gboolean success = FALSE ;
2015-05-29 09:40:24 +02:00
2015-10-12 16:07:01 +02:00
err = nla_parse_nested ( tb , IFLA_INET6_MAX , attr , policy ) ;
2015-05-29 09:40:24 +02:00
if ( err < 0 )
2015-10-12 16:07:01 +02:00
goto errout ;
2015-05-29 09:40:24 +02:00
2015-10-12 16:07:01 +02:00
if ( tb [ IFLA_INET6_CONF ] & & nla_len ( tb [ IFLA_INET6_CONF ] ) % 4 )
goto errout ;
if ( tb [ IFLA_INET6_STATS ] & & nla_len ( tb [ IFLA_INET6_STATS ] ) % 8 )
goto errout ;
if ( tb [ IFLA_INET6_ICMP6STATS ] & & nla_len ( tb [ IFLA_INET6_ICMP6STATS ] ) % 8 )
goto errout ;
2015-05-29 09:40:24 +02:00
2015-10-12 16:07:01 +02:00
if ( _check_addr_or_errout ( tb , IFLA_INET6_TOKEN , sizeof ( struct in6_addr ) ) ) {
nla_memcpy ( & i6_token , tb [ IFLA_INET6_TOKEN ] , sizeof ( struct in6_addr ) ) ;
2016-04-29 21:25:43 +02:00
token_valid = TRUE ;
2015-10-12 16:07:01 +02:00
}
2015-05-29 09:40:24 +02:00
2015-10-12 16:07:01 +02:00
/* Hack to detect support addrgenmode of the kernel. We only parse
* netlink messages that we receive from kernel , hence this check
* is valid . */
_support_user_ipv6ll_detect ( tb ) ;
2015-05-29 09:40:24 +02:00
2015-10-12 16:07:01 +02:00
if ( tb [ IFLA_INET6_ADDR_GEN_MODE ] ) {
i6_addr_gen_mode_inv = _nm_platform_uint8_inv ( nla_get_u8 ( tb [ IFLA_INET6_ADDR_GEN_MODE ] ) ) ;
if ( i6_addr_gen_mode_inv = = 0 ) {
/* an inverse addrgenmode of zero is unexpected. We need to reserve zero
* to signal " unset " . */
goto errout ;
}
2016-04-29 21:25:43 +02:00
addr_gen_mode_valid = TRUE ;
2015-10-12 16:07:01 +02:00
}
success = TRUE ;
2016-04-29 21:25:43 +02:00
if ( token_valid ) {
* out_token_valid = token_valid ;
nm_utils_ipv6_interface_identifier_get_from_addr ( out_token , & i6_token ) ;
}
if ( addr_gen_mode_valid ) {
* out_addr_gen_mode_valid = addr_gen_mode_valid ;
* out_addr_gen_mode_inv = i6_addr_gen_mode_inv ;
2015-10-12 16:07:01 +02:00
}
errout :
return success ;
2015-05-29 09:40:24 +02:00
}
2015-10-12 16:07:01 +02:00
/*****************************************************************************/
2015-10-12 15:15:21 +02:00
static NMPObject *
_parse_lnk_gre ( const char * kind , struct nlattr * info_data )
{
static struct nla_policy policy [ IFLA_GRE_MAX + 1 ] = {
[ IFLA_GRE_LINK ] = { . type = NLA_U32 } ,
[ IFLA_GRE_IFLAGS ] = { . type = NLA_U16 } ,
[ IFLA_GRE_OFLAGS ] = { . type = NLA_U16 } ,
[ IFLA_GRE_IKEY ] = { . type = NLA_U32 } ,
[ IFLA_GRE_OKEY ] = { . type = NLA_U32 } ,
[ IFLA_GRE_LOCAL ] = { . type = NLA_U32 } ,
[ IFLA_GRE_REMOTE ] = { . type = NLA_U32 } ,
[ IFLA_GRE_TTL ] = { . type = NLA_U8 } ,
[ IFLA_GRE_TOS ] = { . type = NLA_U8 } ,
[ IFLA_GRE_PMTUDISC ] = { . type = NLA_U8 } ,
} ;
struct nlattr * tb [ IFLA_GRE_MAX + 1 ] ;
int err ;
NMPObject * obj ;
NMPlatformLnkGre * props ;
if ( ! info_data | | g_strcmp0 ( kind , " gre " ) )
return NULL ;
err = nla_parse_nested ( tb , IFLA_GRE_MAX , info_data , policy ) ;
if ( err < 0 )
return NULL ;
obj = nmp_object_new ( NMP_OBJECT_TYPE_LNK_GRE , NULL ) ;
props = & obj - > lnk_gre ;
props - > parent_ifindex = tb [ IFLA_GRE_LINK ] ? nla_get_u32 ( tb [ IFLA_GRE_LINK ] ) : 0 ;
2015-09-01 22:11:47 +02:00
props - > input_flags = tb [ IFLA_GRE_IFLAGS ] ? ntohs ( nla_get_u16 ( tb [ IFLA_GRE_IFLAGS ] ) ) : 0 ;
props - > output_flags = tb [ IFLA_GRE_OFLAGS ] ? ntohs ( nla_get_u16 ( tb [ IFLA_GRE_OFLAGS ] ) ) : 0 ;
props - > input_key = tb [ IFLA_GRE_IKEY ] ? ntohl ( nla_get_u32 ( tb [ IFLA_GRE_IKEY ] ) ) : 0 ;
props - > output_key = tb [ IFLA_GRE_OKEY ] ? ntohl ( nla_get_u32 ( tb [ IFLA_GRE_OKEY ] ) ) : 0 ;
2015-10-12 15:15:21 +02:00
props - > local = tb [ IFLA_GRE_LOCAL ] ? nla_get_u32 ( tb [ IFLA_GRE_LOCAL ] ) : 0 ;
props - > remote = tb [ IFLA_GRE_REMOTE ] ? nla_get_u32 ( tb [ IFLA_GRE_REMOTE ] ) : 0 ;
props - > tos = tb [ IFLA_GRE_TOS ] ? nla_get_u8 ( tb [ IFLA_GRE_TOS ] ) : 0 ;
props - > ttl = tb [ IFLA_GRE_TTL ] ? nla_get_u8 ( tb [ IFLA_GRE_TTL ] ) : 0 ;
props - > path_mtu_discovery = ! tb [ IFLA_GRE_PMTUDISC ] | | ! ! nla_get_u8 ( tb [ IFLA_GRE_PMTUDISC ] ) ;
return obj ;
}
/*****************************************************************************/
2015-10-15 15:47:14 +02:00
/* IFLA_IPOIB_* were introduced in the 3.7 kernel, but the kernel headers
* we ' re building against might not have those properties even though the
* running kernel might .
*/
# define IFLA_IPOIB_UNSPEC 0
# define IFLA_IPOIB_PKEY 1
# define IFLA_IPOIB_MODE 2
# define IFLA_IPOIB_UMCAST 3
# undef IFLA_IPOIB_MAX
# define IFLA_IPOIB_MAX IFLA_IPOIB_UMCAST
# define IPOIB_MODE_DATAGRAM 0 /* using unreliable datagram QPs */
# define IPOIB_MODE_CONNECTED 1 /* using connected QPs */
static NMPObject *
_parse_lnk_infiniband ( const char * kind , struct nlattr * info_data )
{
static struct nla_policy policy [ IFLA_IPOIB_MAX + 1 ] = {
[ IFLA_IPOIB_PKEY ] = { . type = NLA_U16 } ,
[ IFLA_IPOIB_MODE ] = { . type = NLA_U16 } ,
[ IFLA_IPOIB_UMCAST ] = { . type = NLA_U16 } ,
} ;
struct nlattr * tb [ IFLA_IPOIB_MAX + 1 ] ;
NMPlatformLnkInfiniband * info ;
NMPObject * obj ;
int err ;
const char * mode ;
if ( ! info_data | | g_strcmp0 ( kind , " ipoib " ) )
return NULL ;
err = nla_parse_nested ( tb , IFLA_IPOIB_MAX , info_data , policy ) ;
if ( err < 0 )
return NULL ;
if ( ! tb [ IFLA_IPOIB_PKEY ] | | ! tb [ IFLA_IPOIB_MODE ] )
return NULL ;
switch ( nla_get_u16 ( tb [ IFLA_IPOIB_MODE ] ) ) {
case IPOIB_MODE_DATAGRAM :
mode = " datagram " ;
break ;
case IPOIB_MODE_CONNECTED :
mode = " connected " ;
break ;
default :
return NULL ;
}
obj = nmp_object_new ( NMP_OBJECT_TYPE_LNK_INFINIBAND , NULL ) ;
info = & obj - > lnk_infiniband ;
info - > p_key = nla_get_u16 ( tb [ IFLA_IPOIB_PKEY ] ) ;
info - > mode = mode ;
return obj ;
}
/*****************************************************************************/
2015-11-27 22:22:25 +01:00
static NMPObject *
_parse_lnk_ip6tnl ( const char * kind , struct nlattr * info_data )
{
static struct nla_policy policy [ IFLA_IPTUN_MAX + 1 ] = {
[ IFLA_IPTUN_LINK ] = { . type = NLA_U32 } ,
[ IFLA_IPTUN_LOCAL ] = { . type = NLA_UNSPEC ,
. minlen = sizeof ( struct in6_addr ) } ,
[ IFLA_IPTUN_REMOTE ] = { . type = NLA_UNSPEC ,
. minlen = sizeof ( struct in6_addr ) } ,
[ IFLA_IPTUN_TTL ] = { . type = NLA_U8 } ,
[ IFLA_IPTUN_ENCAP_LIMIT ] = { . type = NLA_U8 } ,
[ IFLA_IPTUN_FLOWINFO ] = { . type = NLA_U32 } ,
[ IFLA_IPTUN_PROTO ] = { . type = NLA_U8 } ,
} ;
struct nlattr * tb [ IFLA_IPTUN_MAX + 1 ] ;
int err ;
NMPObject * obj ;
NMPlatformLnkIp6Tnl * props ;
guint32 flowinfo ;
if ( ! info_data | | g_strcmp0 ( kind , " ip6tnl " ) )
return NULL ;
err = nla_parse_nested ( tb , IFLA_IPTUN_MAX , info_data , policy ) ;
if ( err < 0 )
return NULL ;
obj = nmp_object_new ( NMP_OBJECT_TYPE_LNK_IP6TNL , NULL ) ;
props = & obj - > lnk_ip6tnl ;
if ( tb [ IFLA_IPTUN_LINK ] )
props - > parent_ifindex = nla_get_u32 ( tb [ IFLA_IPTUN_LINK ] ) ;
if ( tb [ IFLA_IPTUN_LOCAL ] )
memcpy ( & props - > local , nla_data ( tb [ IFLA_IPTUN_LOCAL ] ) , sizeof ( props - > local ) ) ;
if ( tb [ IFLA_IPTUN_REMOTE ] )
memcpy ( & props - > remote , nla_data ( tb [ IFLA_IPTUN_REMOTE ] ) , sizeof ( props - > remote ) ) ;
if ( tb [ IFLA_IPTUN_TTL ] )
props - > ttl = nla_get_u8 ( tb [ IFLA_IPTUN_TTL ] ) ;
if ( tb [ IFLA_IPTUN_ENCAP_LIMIT ] )
props - > encap_limit = nla_get_u8 ( tb [ IFLA_IPTUN_ENCAP_LIMIT ] ) ;
if ( tb [ IFLA_IPTUN_FLOWINFO ] ) {
flowinfo = ntohl ( nla_get_u32 ( tb [ IFLA_IPTUN_FLOWINFO ] ) ) ;
props - > flow_label = flowinfo & IP6_FLOWINFO_FLOWLABEL_MASK ;
props - > tclass = ( flowinfo & IP6_FLOWINFO_TCLASS_MASK ) > > IP6_FLOWINFO_TCLASS_SHIFT ;
}
if ( tb [ IFLA_IPTUN_PROTO ] )
props - > proto = nla_get_u8 ( tb [ IFLA_IPTUN_PROTO ] ) ;
return obj ;
}
/*****************************************************************************/
2015-11-27 14:01:56 +01:00
static NMPObject *
_parse_lnk_ipip ( const char * kind , struct nlattr * info_data )
{
static struct nla_policy policy [ IFLA_IPTUN_MAX + 1 ] = {
[ IFLA_IPTUN_LINK ] = { . type = NLA_U32 } ,
[ IFLA_IPTUN_LOCAL ] = { . type = NLA_U32 } ,
[ IFLA_IPTUN_REMOTE ] = { . type = NLA_U32 } ,
[ IFLA_IPTUN_TTL ] = { . type = NLA_U8 } ,
[ IFLA_IPTUN_TOS ] = { . type = NLA_U8 } ,
[ IFLA_IPTUN_PMTUDISC ] = { . type = NLA_U8 } ,
} ;
struct nlattr * tb [ IFLA_IPTUN_MAX + 1 ] ;
int err ;
NMPObject * obj ;
NMPlatformLnkIpIp * props ;
if ( ! info_data | | g_strcmp0 ( kind , " ipip " ) )
return NULL ;
err = nla_parse_nested ( tb , IFLA_IPTUN_MAX , info_data , policy ) ;
if ( err < 0 )
return NULL ;
obj = nmp_object_new ( NMP_OBJECT_TYPE_LNK_IPIP , NULL ) ;
props = & obj - > lnk_ipip ;
props - > parent_ifindex = tb [ IFLA_IPTUN_LINK ] ? nla_get_u32 ( tb [ IFLA_IPTUN_LINK ] ) : 0 ;
props - > local = tb [ IFLA_IPTUN_LOCAL ] ? nla_get_u32 ( tb [ IFLA_IPTUN_LOCAL ] ) : 0 ;
props - > remote = tb [ IFLA_IPTUN_REMOTE ] ? nla_get_u32 ( tb [ IFLA_IPTUN_REMOTE ] ) : 0 ;
props - > tos = tb [ IFLA_IPTUN_TOS ] ? nla_get_u8 ( tb [ IFLA_IPTUN_TOS ] ) : 0 ;
props - > ttl = tb [ IFLA_IPTUN_TTL ] ? nla_get_u8 ( tb [ IFLA_IPTUN_TTL ] ) : 0 ;
props - > path_mtu_discovery = ! tb [ IFLA_IPTUN_PMTUDISC ] | | ! ! nla_get_u8 ( tb [ IFLA_IPTUN_PMTUDISC ] ) ;
return obj ;
}
/*****************************************************************************/
2015-10-12 15:15:21 +02:00
static NMPObject *
_parse_lnk_macvlan ( const char * kind , struct nlattr * info_data )
{
static struct nla_policy policy [ IFLA_MACVLAN_MAX + 1 ] = {
[ IFLA_MACVLAN_MODE ] = { . type = NLA_U32 } ,
[ IFLA_MACVLAN_FLAGS ] = { . type = NLA_U16 } ,
} ;
NMPlatformLnkMacvlan * props ;
struct nlattr * tb [ IFLA_MACVLAN_MAX + 1 ] ;
int err ;
NMPObject * obj ;
2015-12-04 09:49:39 +01:00
gboolean tap ;
2015-10-12 15:15:21 +02:00
2015-12-04 09:49:39 +01:00
if ( ! info_data )
return NULL ;
if ( ! g_strcmp0 ( kind , " macvlan " ) )
tap = FALSE ;
else if ( ! g_strcmp0 ( kind , " macvtap " ) )
tap = TRUE ;
else
2015-10-12 15:15:21 +02:00
return NULL ;
err = nla_parse_nested ( tb , IFLA_MACVLAN_MAX , info_data , policy ) ;
if ( err < 0 )
return NULL ;
if ( ! tb [ IFLA_MACVLAN_MODE ] )
return NULL ;
2015-12-04 09:49:39 +01:00
obj = nmp_object_new ( tap ? NMP_OBJECT_TYPE_LNK_MACVTAP : NMP_OBJECT_TYPE_LNK_MACVLAN , NULL ) ;
2015-10-12 15:15:21 +02:00
props = & obj - > lnk_macvlan ;
2015-12-03 15:44:33 +01:00
props - > mode = nla_get_u32 ( tb [ IFLA_MACVLAN_MODE ] ) ;
2015-12-04 09:49:39 +01:00
props - > tap = tap ;
2015-10-12 15:15:21 +02:00
if ( tb [ IFLA_MACVLAN_FLAGS ] )
props - > no_promisc = NM_FLAGS_HAS ( nla_get_u16 ( tb [ IFLA_MACVLAN_FLAGS ] ) , MACVLAN_FLAG_NOPROMISC ) ;
return obj ;
}
/*****************************************************************************/
2016-06-30 18:20:09 +02:00
static NMPObject *
_parse_lnk_macsec ( const char * kind , struct nlattr * info_data )
{
static struct nla_policy policy [ __IFLA_MACSEC_MAX ] = {
[ IFLA_MACSEC_SCI ] = { . type = NLA_U64 } ,
[ IFLA_MACSEC_ICV_LEN ] = { . type = NLA_U8 } ,
[ IFLA_MACSEC_CIPHER_SUITE ] = { . type = NLA_U64 } ,
[ IFLA_MACSEC_WINDOW ] = { . type = NLA_U32 } ,
[ IFLA_MACSEC_ENCODING_SA ] = { . type = NLA_U8 } ,
[ IFLA_MACSEC_ENCRYPT ] = { . type = NLA_U8 } ,
[ IFLA_MACSEC_PROTECT ] = { . type = NLA_U8 } ,
[ IFLA_MACSEC_INC_SCI ] = { . type = NLA_U8 } ,
[ IFLA_MACSEC_ES ] = { . type = NLA_U8 } ,
[ IFLA_MACSEC_SCB ] = { . type = NLA_U8 } ,
[ IFLA_MACSEC_REPLAY_PROTECT ] = { . type = NLA_U8 } ,
[ IFLA_MACSEC_VALIDATION ] = { . type = NLA_U8 } ,
} ;
struct nlattr * tb [ __IFLA_MACSEC_MAX ] ;
int err ;
NMPObject * obj ;
NMPlatformLnkMacsec * props ;
if ( ! info_data | | ! nm_streq0 ( kind , " macsec " ) )
return NULL ;
err = nla_parse_nested ( tb , __IFLA_MACSEC_MAX - 1 , info_data , policy ) ;
if ( err < 0 )
return NULL ;
obj = nmp_object_new ( NMP_OBJECT_TYPE_LNK_MACSEC , NULL ) ;
props = & obj - > lnk_macsec ;
props - > sci = tb [ IFLA_MACSEC_SCI ] ? be64toh ( nla_get_u64 ( tb [ IFLA_MACSEC_SCI ] ) ) : 0 ;
props - > icv_length = tb [ IFLA_MACSEC_ICV_LEN ] ? nla_get_u8 ( tb [ IFLA_MACSEC_ICV_LEN ] ) : 0 ;
props - > cipher_suite = tb [ IFLA_MACSEC_CIPHER_SUITE ] ? nla_get_u64 ( tb [ IFLA_MACSEC_CIPHER_SUITE ] ) : 0 ;
props - > window = tb [ IFLA_MACSEC_WINDOW ] ? nla_get_u32 ( tb [ IFLA_MACSEC_WINDOW ] ) : 0 ;
props - > encoding_sa = tb [ IFLA_MACSEC_ENCODING_SA ] ? ! ! nla_get_u8 ( tb [ IFLA_MACSEC_ENCODING_SA ] ) : 0 ;
props - > encrypt = tb [ IFLA_MACSEC_ENCRYPT ] ? ! ! nla_get_u8 ( tb [ IFLA_MACSEC_ENCRYPT ] ) : 0 ;
props - > protect = tb [ IFLA_MACSEC_PROTECT ] ? ! ! nla_get_u8 ( tb [ IFLA_MACSEC_PROTECT ] ) : 0 ;
props - > include_sci = tb [ IFLA_MACSEC_INC_SCI ] ? ! ! nla_get_u8 ( tb [ IFLA_MACSEC_INC_SCI ] ) : 0 ;
props - > es = tb [ IFLA_MACSEC_ES ] ? ! ! nla_get_u8 ( tb [ IFLA_MACSEC_ES ] ) : 0 ;
props - > scb = tb [ IFLA_MACSEC_SCB ] ? ! ! nla_get_u8 ( tb [ IFLA_MACSEC_SCB ] ) : 0 ;
props - > replay_protect = tb [ IFLA_MACSEC_REPLAY_PROTECT ] ? ! ! nla_get_u8 ( tb [ IFLA_MACSEC_REPLAY_PROTECT ] ) : 0 ;
props - > validation = tb [ IFLA_MACSEC_VALIDATION ] ? nla_get_u8 ( tb [ IFLA_MACSEC_VALIDATION ] ) : 0 ;
return obj ;
}
/*****************************************************************************/
2015-11-11 18:41:48 +01:00
static NMPObject *
_parse_lnk_sit ( const char * kind , struct nlattr * info_data )
{
static struct nla_policy policy [ IFLA_IPTUN_MAX + 1 ] = {
[ IFLA_IPTUN_LINK ] = { . type = NLA_U32 } ,
[ IFLA_IPTUN_LOCAL ] = { . type = NLA_U32 } ,
[ IFLA_IPTUN_REMOTE ] = { . type = NLA_U32 } ,
[ IFLA_IPTUN_TTL ] = { . type = NLA_U8 } ,
[ IFLA_IPTUN_TOS ] = { . type = NLA_U8 } ,
[ IFLA_IPTUN_PMTUDISC ] = { . type = NLA_U8 } ,
[ IFLA_IPTUN_FLAGS ] = { . type = NLA_U16 } ,
[ IFLA_IPTUN_PROTO ] = { . type = NLA_U8 } ,
} ;
struct nlattr * tb [ IFLA_IPTUN_MAX + 1 ] ;
int err ;
NMPObject * obj ;
NMPlatformLnkSit * props ;
if ( ! info_data | | g_strcmp0 ( kind , " sit " ) )
return NULL ;
err = nla_parse_nested ( tb , IFLA_IPTUN_MAX , info_data , policy ) ;
if ( err < 0 )
return NULL ;
obj = nmp_object_new ( NMP_OBJECT_TYPE_LNK_SIT , NULL ) ;
props = & obj - > lnk_sit ;
props - > parent_ifindex = tb [ IFLA_IPTUN_LINK ] ? nla_get_u32 ( tb [ IFLA_IPTUN_LINK ] ) : 0 ;
props - > local = tb [ IFLA_IPTUN_LOCAL ] ? nla_get_u32 ( tb [ IFLA_IPTUN_LOCAL ] ) : 0 ;
props - > remote = tb [ IFLA_IPTUN_REMOTE ] ? nla_get_u32 ( tb [ IFLA_IPTUN_REMOTE ] ) : 0 ;
props - > tos = tb [ IFLA_IPTUN_TOS ] ? nla_get_u8 ( tb [ IFLA_IPTUN_TOS ] ) : 0 ;
props - > ttl = tb [ IFLA_IPTUN_TTL ] ? nla_get_u8 ( tb [ IFLA_IPTUN_TTL ] ) : 0 ;
props - > path_mtu_discovery = ! tb [ IFLA_IPTUN_PMTUDISC ] | | ! ! nla_get_u8 ( tb [ IFLA_IPTUN_PMTUDISC ] ) ;
props - > flags = tb [ IFLA_IPTUN_FLAGS ] ? nla_get_u16 ( tb [ IFLA_IPTUN_FLAGS ] ) : 0 ;
props - > proto = tb [ IFLA_IPTUN_PROTO ] ? nla_get_u8 ( tb [ IFLA_IPTUN_PROTO ] ) : 0 ;
return obj ;
}
/*****************************************************************************/
2015-10-27 16:14:54 +01:00
static gboolean
_vlan_qos_mapping_from_nla ( struct nlattr * nlattr ,
const NMVlanQosMapping * * out_map ,
guint * out_n_map )
{
struct nlattr * nla ;
int remaining ;
gs_unref_ptrarray GPtrArray * array = NULL ;
G_STATIC_ASSERT ( sizeof ( NMVlanQosMapping ) = = sizeof ( struct ifla_vlan_qos_mapping ) ) ;
G_STATIC_ASSERT ( sizeof ( ( ( NMVlanQosMapping * ) 0 ) - > to ) = = sizeof ( ( ( struct ifla_vlan_qos_mapping * ) 0 ) - > to ) ) ;
G_STATIC_ASSERT ( sizeof ( ( ( NMVlanQosMapping * ) 0 ) - > from ) = = sizeof ( ( ( struct ifla_vlan_qos_mapping * ) 0 ) - > from ) ) ;
G_STATIC_ASSERT ( sizeof ( NMVlanQosMapping ) = = sizeof ( ( ( NMVlanQosMapping * ) 0 ) - > from ) + sizeof ( ( ( NMVlanQosMapping * ) 0 ) - > to ) ) ;
nm_assert ( out_map & & ! * out_map ) ;
nm_assert ( out_n_map & & ! * out_n_map ) ;
if ( ! nlattr )
return TRUE ;
array = g_ptr_array_new ( ) ;
nla_for_each_nested ( nla , nlattr , remaining ) {
if ( nla_len ( nla ) < sizeof ( NMVlanQosMapping ) )
return FALSE ;
g_ptr_array_add ( array , nla_data ( nla ) ) ;
}
if ( array - > len > 0 ) {
NMVlanQosMapping * list ;
guint i , j ;
/* The sorting is necessary, because for egress mapping, kernel
* doesn ' t sent the items strictly sorted by the from field . */
g_ptr_array_sort_with_data ( array , _vlan_qos_mapping_cmp_from_ptr , NULL ) ;
list = g_new ( NMVlanQosMapping , array - > len ) ;
for ( i = 0 , j = 0 ; i < array - > len ; i + + ) {
NMVlanQosMapping * map ;
map = array - > pdata [ i ] ;
/* kernel doesn't really send us duplicates. Just be extra cautious
* because we want strong guarantees about the sort order and uniqueness
* of our mapping list ( for simpler equality comparison ) . */
if ( j > 0
& & list [ j - 1 ] . from = = map - > from )
list [ j - 1 ] = * map ;
else
list [ j + + ] = * map ;
}
* out_n_map = j ;
* out_map = list ;
}
return TRUE ;
}
2015-10-12 16:07:01 +02:00
/* Copied and heavily modified from libnl3's vlan_parse() */
2015-10-12 13:44:44 +02:00
static NMPObject *
_parse_lnk_vlan ( const char * kind , struct nlattr * info_data )
2015-10-12 16:07:01 +02:00
{
static struct nla_policy policy [ IFLA_VLAN_MAX + 1 ] = {
[ IFLA_VLAN_ID ] = { . type = NLA_U16 } ,
2017-01-16 16:11:05 +01:00
[ IFLA_VLAN_FLAGS ] = { . minlen = nm_offsetofend ( struct ifla_vlan_flags , flags ) } ,
2015-10-12 16:07:01 +02:00
[ IFLA_VLAN_INGRESS_QOS ] = { . type = NLA_NESTED } ,
[ IFLA_VLAN_EGRESS_QOS ] = { . type = NLA_NESTED } ,
[ IFLA_VLAN_PROTOCOL ] = { . type = NLA_U16 } ,
} ;
struct nlattr * tb [ IFLA_VLAN_MAX + 1 ] ;
2015-05-29 09:40:24 +02:00
int err ;
2015-10-27 16:14:54 +01:00
nm_auto_nmpobj NMPObject * obj = NULL ;
NMPObject * obj_result ;
2015-05-29 09:40:24 +02:00
2015-10-12 13:44:44 +02:00
if ( ! info_data | | g_strcmp0 ( kind , " vlan " ) )
return NULL ;
2015-05-29 09:40:24 +02:00
2015-10-12 13:44:44 +02:00
if ( ( err = nla_parse_nested ( tb , IFLA_VLAN_MAX , info_data , policy ) ) < 0 )
return NULL ;
2015-05-29 09:40:24 +02:00
2015-10-12 16:07:01 +02:00
if ( ! tb [ IFLA_VLAN_ID ] )
2015-10-12 13:44:44 +02:00
return NULL ;
2015-05-29 09:40:24 +02:00
2015-10-12 13:44:44 +02:00
obj = nmp_object_new ( NMP_OBJECT_TYPE_LNK_VLAN , NULL ) ;
2015-10-27 16:14:54 +01:00
obj - > lnk_vlan . id = nla_get_u16 ( tb [ IFLA_VLAN_ID ] ) ;
2015-05-29 09:40:24 +02:00
2015-10-27 16:14:54 +01:00
if ( tb [ IFLA_VLAN_FLAGS ] ) {
struct ifla_vlan_flags flags ;
nla_memcpy ( & flags , tb [ IFLA_VLAN_FLAGS ] , sizeof ( flags ) ) ;
obj - > lnk_vlan . flags = flags . flags ;
}
if ( ! _vlan_qos_mapping_from_nla ( tb [ IFLA_VLAN_INGRESS_QOS ] ,
& obj - > _lnk_vlan . ingress_qos_map ,
& obj - > _lnk_vlan . n_ingress_qos_map ) )
return NULL ;
if ( ! _vlan_qos_mapping_from_nla ( tb [ IFLA_VLAN_EGRESS_QOS ] ,
& obj - > _lnk_vlan . egress_qos_map ,
& obj - > _lnk_vlan . n_egress_qos_map ) )
return NULL ;
obj_result = obj ;
obj = NULL ;
return obj_result ;
2015-10-12 16:07:01 +02:00
}
/*****************************************************************************/
2015-10-12 15:15:21 +02:00
/* The installed kernel headers might not have VXLAN stuff at all, or
* they might have the original properties , but not PORT , GROUP6 , or LOCAL6 .
* So until we depend on kernel > = 3.11 , we just ignore the actual enum
* in if_link . h and define the values ourselves .
*/
# define IFLA_VXLAN_UNSPEC 0
# define IFLA_VXLAN_ID 1
# define IFLA_VXLAN_GROUP 2
# define IFLA_VXLAN_LINK 3
# define IFLA_VXLAN_LOCAL 4
# define IFLA_VXLAN_TTL 5
# define IFLA_VXLAN_TOS 6
# define IFLA_VXLAN_LEARNING 7
# define IFLA_VXLAN_AGEING 8
# define IFLA_VXLAN_LIMIT 9
# define IFLA_VXLAN_PORT_RANGE 10
# define IFLA_VXLAN_PROXY 11
# define IFLA_VXLAN_RSC 12
# define IFLA_VXLAN_L2MISS 13
# define IFLA_VXLAN_L3MISS 14
# define IFLA_VXLAN_PORT 15
# define IFLA_VXLAN_GROUP6 16
# define IFLA_VXLAN_LOCAL6 17
# undef IFLA_VXLAN_MAX
# define IFLA_VXLAN_MAX IFLA_VXLAN_LOCAL6
/* older kernel header might not contain 'struct ifla_vxlan_port_range'.
* Redefine it . */
struct nm_ifla_vxlan_port_range {
guint16 low ;
guint16 high ;
} ;
static NMPObject *
_parse_lnk_vxlan ( const char * kind , struct nlattr * info_data )
{
static struct nla_policy policy [ IFLA_VXLAN_MAX + 1 ] = {
[ IFLA_VXLAN_ID ] = { . type = NLA_U32 } ,
[ IFLA_VXLAN_GROUP ] = { . type = NLA_U32 } ,
[ IFLA_VXLAN_GROUP6 ] = { . type = NLA_UNSPEC ,
. minlen = sizeof ( struct in6_addr ) } ,
[ IFLA_VXLAN_LINK ] = { . type = NLA_U32 } ,
[ IFLA_VXLAN_LOCAL ] = { . type = NLA_U32 } ,
[ IFLA_VXLAN_LOCAL6 ] = { . type = NLA_UNSPEC ,
. minlen = sizeof ( struct in6_addr ) } ,
[ IFLA_VXLAN_TOS ] = { . type = NLA_U8 } ,
[ IFLA_VXLAN_TTL ] = { . type = NLA_U8 } ,
[ IFLA_VXLAN_LEARNING ] = { . type = NLA_U8 } ,
[ IFLA_VXLAN_AGEING ] = { . type = NLA_U32 } ,
[ IFLA_VXLAN_LIMIT ] = { . type = NLA_U32 } ,
[ IFLA_VXLAN_PORT_RANGE ] = { . type = NLA_UNSPEC ,
. minlen = sizeof ( struct nm_ifla_vxlan_port_range ) } ,
[ IFLA_VXLAN_PROXY ] = { . type = NLA_U8 } ,
[ IFLA_VXLAN_RSC ] = { . type = NLA_U8 } ,
[ IFLA_VXLAN_L2MISS ] = { . type = NLA_U8 } ,
[ IFLA_VXLAN_L3MISS ] = { . type = NLA_U8 } ,
[ IFLA_VXLAN_PORT ] = { . type = NLA_U16 } ,
} ;
NMPlatformLnkVxlan * props ;
struct nlattr * tb [ IFLA_VXLAN_MAX + 1 ] ;
struct nm_ifla_vxlan_port_range * range ;
int err ;
NMPObject * obj ;
if ( ! info_data | | g_strcmp0 ( kind , " vxlan " ) )
return NULL ;
err = nla_parse_nested ( tb , IFLA_VXLAN_MAX , info_data , policy ) ;
if ( err < 0 )
return NULL ;
obj = nmp_object_new ( NMP_OBJECT_TYPE_LNK_VXLAN , NULL ) ;
props = & obj - > lnk_vxlan ;
if ( tb [ IFLA_VXLAN_LINK ] )
props - > parent_ifindex = nla_get_u32 ( tb [ IFLA_VXLAN_LINK ] ) ;
if ( tb [ IFLA_VXLAN_ID ] )
props - > id = nla_get_u32 ( tb [ IFLA_VXLAN_ID ] ) ;
if ( tb [ IFLA_VXLAN_GROUP ] )
props - > group = nla_get_u32 ( tb [ IFLA_VXLAN_GROUP ] ) ;
if ( tb [ IFLA_VXLAN_LOCAL ] )
props - > local = nla_get_u32 ( tb [ IFLA_VXLAN_LOCAL ] ) ;
if ( tb [ IFLA_VXLAN_GROUP6 ] )
memcpy ( & props - > group6 , nla_data ( tb [ IFLA_VXLAN_GROUP6 ] ) , sizeof ( props - > group6 ) ) ;
if ( tb [ IFLA_VXLAN_LOCAL6 ] )
memcpy ( & props - > local6 , nla_data ( tb [ IFLA_VXLAN_LOCAL6 ] ) , sizeof ( props - > local6 ) ) ;
if ( tb [ IFLA_VXLAN_AGEING ] )
props - > ageing = nla_get_u32 ( tb [ IFLA_VXLAN_AGEING ] ) ;
if ( tb [ IFLA_VXLAN_LIMIT ] )
props - > limit = nla_get_u32 ( tb [ IFLA_VXLAN_LIMIT ] ) ;
if ( tb [ IFLA_VXLAN_TOS ] )
props - > tos = nla_get_u8 ( tb [ IFLA_VXLAN_TOS ] ) ;
if ( tb [ IFLA_VXLAN_TTL ] )
props - > ttl = nla_get_u8 ( tb [ IFLA_VXLAN_TTL ] ) ;
if ( tb [ IFLA_VXLAN_PORT ] )
props - > dst_port = ntohs ( nla_get_u16 ( tb [ IFLA_VXLAN_PORT ] ) ) ;
if ( tb [ IFLA_VXLAN_PORT_RANGE ] ) {
range = nla_data ( tb [ IFLA_VXLAN_PORT_RANGE ] ) ;
props - > src_port_min = ntohs ( range - > low ) ;
props - > src_port_max = ntohs ( range - > high ) ;
}
if ( tb [ IFLA_VXLAN_LEARNING ] )
props - > learning = ! ! nla_get_u8 ( tb [ IFLA_VXLAN_LEARNING ] ) ;
if ( tb [ IFLA_VXLAN_PROXY ] )
props - > proxy = ! ! nla_get_u8 ( tb [ IFLA_VXLAN_PROXY ] ) ;
if ( tb [ IFLA_VXLAN_RSC ] )
props - > rsc = ! ! nla_get_u8 ( tb [ IFLA_VXLAN_RSC ] ) ;
if ( tb [ IFLA_VXLAN_L2MISS ] )
props - > l2miss = ! ! nla_get_u8 ( tb [ IFLA_VXLAN_L2MISS ] ) ;
if ( tb [ IFLA_VXLAN_L3MISS ] )
props - > l3miss = ! ! nla_get_u8 ( tb [ IFLA_VXLAN_L3MISS ] ) ;
return obj ;
}
/*****************************************************************************/
2015-10-12 16:07:01 +02:00
/* Copied and heavily modified from libnl3's link_msg_parser(). */
static NMPObject *
_new_from_nl_link ( NMPlatform * platform , const NMPCache * cache , struct nlmsghdr * nlh , gboolean id_only )
{
static struct nla_policy policy [ IFLA_MAX + 1 ] = {
[ IFLA_IFNAME ] = { . type = NLA_STRING ,
. maxlen = IFNAMSIZ } ,
[ IFLA_MTU ] = { . type = NLA_U32 } ,
[ IFLA_TXQLEN ] = { . type = NLA_U32 } ,
[ IFLA_LINK ] = { . type = NLA_U32 } ,
[ IFLA_WEIGHT ] = { . type = NLA_U32 } ,
[ IFLA_MASTER ] = { . type = NLA_U32 } ,
[ IFLA_OPERSTATE ] = { . type = NLA_U8 } ,
[ IFLA_LINKMODE ] = { . type = NLA_U8 } ,
[ IFLA_LINKINFO ] = { . type = NLA_NESTED } ,
[ IFLA_QDISC ] = { . type = NLA_STRING ,
. maxlen = IFQDISCSIZ } ,
2017-01-16 16:11:05 +01:00
[ IFLA_STATS ] = { . minlen = nm_offsetofend ( struct rtnl_link_stats , tx_compressed ) } ,
[ IFLA_STATS64 ] = { . minlen = nm_offsetofend ( struct rtnl_link_stats64 , tx_compressed ) } ,
[ IFLA_MAP ] = { . minlen = nm_offsetofend ( struct rtnl_link_ifmap , port ) } ,
2015-10-12 16:07:01 +02:00
[ IFLA_IFALIAS ] = { . type = NLA_STRING , . maxlen = IFALIASZ } ,
[ IFLA_NUM_VF ] = { . type = NLA_U32 } ,
[ IFLA_AF_SPEC ] = { . type = NLA_NESTED } ,
[ IFLA_PROMISCUITY ] = { . type = NLA_U32 } ,
[ IFLA_NUM_TX_QUEUES ] = { . type = NLA_U32 } ,
[ IFLA_NUM_RX_QUEUES ] = { . type = NLA_U32 } ,
[ IFLA_GROUP ] = { . type = NLA_U32 } ,
[ IFLA_CARRIER ] = { . type = NLA_U8 } ,
[ IFLA_PHYS_PORT_ID ] = { . type = NLA_UNSPEC } ,
[ IFLA_NET_NS_PID ] = { . type = NLA_U32 } ,
[ IFLA_NET_NS_FD ] = { . type = NLA_U32 } ,
} ;
static struct nla_policy policy_link_info [ IFLA_INFO_MAX + 1 ] = {
[ IFLA_INFO_KIND ] = { . type = NLA_STRING } ,
[ IFLA_INFO_DATA ] = { . type = NLA_NESTED } ,
[ IFLA_INFO_XSTATS ] = { . type = NLA_NESTED } ,
} ;
const struct ifinfomsg * ifi ;
struct nlattr * tb [ IFLA_MAX + 1 ] ;
struct nlattr * li [ IFLA_INFO_MAX + 1 ] ;
struct nlattr * nl_info_data = NULL ;
const char * nl_info_kind = NULL ;
int err ;
nm_auto_nmpobj NMPObject * obj = NULL ;
NMPObject * obj_result = NULL ;
gboolean completed_from_cache_val = FALSE ;
gboolean * completed_from_cache = cache ? & completed_from_cache_val : NULL ;
const NMPObject * link_cached = NULL ;
2015-11-20 11:54:47 +01:00
NMPObject * lnk_data = NULL ;
2016-05-25 11:16:17 +02:00
gboolean address_complete_from_cache = TRUE ;
gboolean lnk_data_complete_from_cache = TRUE ;
2016-04-29 21:25:43 +02:00
gboolean af_inet6_token_valid = FALSE ;
gboolean af_inet6_addr_gen_mode_valid = FALSE ;
2015-05-29 09:40:24 +02:00
2015-10-12 16:07:01 +02:00
if ( ! nlmsg_valid_hdr ( nlh , sizeof ( * ifi ) ) )
return NULL ;
ifi = nlmsg_data ( nlh ) ;
2015-05-29 09:40:24 +02:00
2015-10-12 16:07:01 +02:00
obj = nmp_object_new_link ( ifi - > ifi_index ) ;
2015-05-07 10:16:15 +02:00
2015-10-12 16:07:01 +02:00
if ( id_only )
2015-11-20 11:54:47 +01:00
goto id_only_handled ;
2015-05-07 10:16:15 +02:00
2015-10-12 16:07:01 +02:00
err = nlmsg_parse ( nlh , sizeof ( * ifi ) , tb , IFLA_MAX , policy ) ;
if ( err < 0 )
goto errout ;
2015-05-07 10:16:15 +02:00
2015-10-12 16:07:01 +02:00
if ( ! tb [ IFLA_IFNAME ] )
goto errout ;
nla_strlcpy ( obj - > link . name , tb [ IFLA_IFNAME ] , IFNAMSIZ ) ;
if ( ! obj - > link . name [ 0 ] )
goto errout ;
2015-05-07 10:16:15 +02:00
2015-10-12 16:07:01 +02:00
if ( tb [ IFLA_LINKINFO ] ) {
err = nla_parse_nested ( li , IFLA_INFO_MAX , tb [ IFLA_LINKINFO ] , policy_link_info ) ;
if ( err < 0 )
goto errout ;
2015-05-07 10:16:15 +02:00
2015-10-12 16:07:01 +02:00
if ( li [ IFLA_INFO_KIND ] )
nl_info_kind = nla_get_string ( li [ IFLA_INFO_KIND ] ) ;
2015-05-07 10:16:15 +02:00
2015-10-12 16:07:01 +02:00
nl_info_data = li [ IFLA_INFO_DATA ] ;
}
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
2016-08-10 11:54:30 +02:00
if ( tb [ IFLA_STATS64 ] ) {
2016-10-08 14:39:19 +02:00
/* tb[IFLA_STATS64] is only guaranteed to be 32bit-aligned,
* so in general we can ' t access the rtnl_link_stats64 struct
* members directly on 64 bit architectures . */
char * stats = nla_data ( tb [ IFLA_STATS64 ] ) ;
# define READ_STAT64(member) \
unaligned_read_ne64 ( stats + offsetof ( struct rtnl_link_stats64 , member ) )
obj - > link . rx_packets = READ_STAT64 ( rx_packets ) ;
obj - > link . rx_bytes = READ_STAT64 ( rx_bytes ) ;
obj - > link . tx_packets = READ_STAT64 ( tx_packets ) ;
obj - > link . tx_bytes = READ_STAT64 ( tx_bytes ) ;
2016-08-10 11:54:30 +02:00
}
2016-02-29 15:52:27 +01:00
obj - > link . n_ifi_flags = ifi - > ifi_flags ;
obj - > link . connected = NM_FLAGS_HAS ( obj - > link . n_ifi_flags , IFF_LOWER_UP ) ;
2015-10-12 16:07:01 +02:00
obj - > link . arptype = ifi - > ifi_type ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
2015-10-12 16:07:01 +02:00
obj - > link . type = _linktype_get_type ( platform ,
cache ,
nl_info_kind ,
obj - > link . ifindex ,
obj - > link . name ,
2016-02-29 15:52:27 +01:00
obj - > link . n_ifi_flags ,
2015-10-12 16:07:01 +02:00
obj - > link . arptype ,
completed_from_cache ,
& link_cached ,
2015-10-16 11:28:34 +02:00
& obj - > link . kind ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
2015-10-12 16:07:01 +02:00
if ( tb [ IFLA_MASTER ] )
obj - > link . master = nla_get_u32 ( tb [ IFLA_MASTER ] ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
2015-10-12 16:07:01 +02:00
if ( tb [ IFLA_LINK ] ) {
if ( ! tb [ IFLA_LINK_NETNSID ] )
obj - > link . parent = nla_get_u32 ( tb [ IFLA_LINK ] ) ;
else
obj - > link . parent = NM_PLATFORM_LINK_OTHER_NETNS ;
}
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
2015-10-12 16:07:01 +02:00
if ( tb [ IFLA_ADDRESS ] ) {
int l = nla_len ( tb [ IFLA_ADDRESS ] ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
2015-10-12 16:07:01 +02:00
if ( l > 0 & & l < = NM_UTILS_HWADDR_LEN_MAX ) {
G_STATIC_ASSERT ( NM_UTILS_HWADDR_LEN_MAX = = sizeof ( obj - > link . addr . data ) ) ;
memcpy ( obj - > link . addr . data , nla_data ( tb [ IFLA_ADDRESS ] ) , l ) ;
obj - > link . addr . len = l ;
}
2016-05-25 11:16:17 +02:00
address_complete_from_cache = FALSE ;
2015-10-12 16:07:01 +02:00
}
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
2015-10-12 16:07:01 +02:00
if ( tb [ IFLA_AF_SPEC ] ) {
struct nlattr * af_attr ;
int remaining ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
2015-10-12 16:07:01 +02:00
nla_for_each_nested ( af_attr , tb [ IFLA_AF_SPEC ] , remaining ) {
switch ( nla_type ( af_attr ) ) {
case AF_INET6 :
_parse_af_inet6 ( platform ,
af_attr ,
2016-04-29 21:25:43 +02:00
& obj - > link . inet6_token ,
& af_inet6_token_valid ,
& obj - > link . inet6_addr_gen_mode_inv ,
& af_inet6_addr_gen_mode_valid ) ;
2015-10-12 16:07:01 +02:00
break ;
}
}
}
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
2015-10-12 16:07:01 +02:00
if ( tb [ IFLA_MTU ] )
obj - > link . mtu = nla_get_u32 ( tb [ IFLA_MTU ] ) ;
2015-10-12 13:44:44 +02:00
switch ( obj - > link . type ) {
2015-10-12 15:15:21 +02:00
case NM_LINK_TYPE_GRE :
lnk_data = _parse_lnk_gre ( nl_info_kind , nl_info_data ) ;
break ;
2015-10-15 15:47:14 +02:00
case NM_LINK_TYPE_INFINIBAND :
lnk_data = _parse_lnk_infiniband ( nl_info_kind , nl_info_data ) ;
break ;
2015-11-27 22:22:25 +01:00
case NM_LINK_TYPE_IP6TNL :
lnk_data = _parse_lnk_ip6tnl ( nl_info_kind , nl_info_data ) ;
break ;
2015-11-27 14:01:56 +01:00
case NM_LINK_TYPE_IPIP :
lnk_data = _parse_lnk_ipip ( nl_info_kind , nl_info_data ) ;
break ;
2016-06-30 18:20:09 +02:00
case NM_LINK_TYPE_MACSEC :
lnk_data = _parse_lnk_macsec ( nl_info_kind , nl_info_data ) ;
break ;
2015-10-12 15:15:21 +02:00
case NM_LINK_TYPE_MACVLAN :
2015-12-04 09:49:39 +01:00
case NM_LINK_TYPE_MACVTAP :
2015-10-12 15:15:21 +02:00
lnk_data = _parse_lnk_macvlan ( nl_info_kind , nl_info_data ) ;
break ;
2015-11-11 18:41:48 +01:00
case NM_LINK_TYPE_SIT :
lnk_data = _parse_lnk_sit ( nl_info_kind , nl_info_data ) ;
break ;
2015-10-12 13:44:44 +02:00
case NM_LINK_TYPE_VLAN :
lnk_data = _parse_lnk_vlan ( nl_info_kind , nl_info_data ) ;
break ;
2015-10-12 15:15:21 +02:00
case NM_LINK_TYPE_VXLAN :
lnk_data = _parse_lnk_vxlan ( nl_info_kind , nl_info_data ) ;
break ;
2015-10-12 13:44:44 +02:00
default :
2016-05-25 11:16:17 +02:00
lnk_data_complete_from_cache = FALSE ;
break ;
2015-10-12 13:44:44 +02:00
}
2016-05-25 11:16:17 +02:00
if ( completed_from_cache
& & ( lnk_data_complete_from_cache
2016-04-29 21:25:43 +02:00
| | address_complete_from_cache
| | ! af_inet6_token_valid
2016-08-15 23:23:25 +02:00
| | ! af_inet6_addr_gen_mode_valid
| | ! tb [ IFLA_STATS64 ] ) ) {
2015-10-12 13:44:44 +02:00
_lookup_cached_link ( cache , obj - > link . ifindex , completed_from_cache , & link_cached ) ;
2016-05-25 11:16:17 +02:00
if ( link_cached ) {
if ( lnk_data_complete_from_cache
& & link_cached - > link . type = = obj - > link . type
& & link_cached - > _link . netlink . lnk
& & ( ! lnk_data
| | nmp_object_equal ( lnk_data , link_cached - > _link . netlink . lnk ) ) ) {
/* We always try to look into the cache and reuse the object there.
* We do that , because we consider the lnk object as immutable and don ' t
* modify it after creating . Hence we can share it and reuse .
*
* Also , sometimes the info - data is missing for updates . In this case
* we want to keep the previously received lnk_data . */
nmp_object_unref ( lnk_data ) ;
lnk_data = nmp_object_ref ( link_cached - > _link . netlink . lnk ) ;
}
if ( address_complete_from_cache )
obj - > link . addr = link_cached - > link . addr ;
2016-04-29 21:25:43 +02:00
if ( ! af_inet6_token_valid )
obj - > link . inet6_token = link_cached - > link . inet6_token ;
if ( ! af_inet6_addr_gen_mode_valid )
obj - > link . inet6_addr_gen_mode_inv = link_cached - > link . inet6_addr_gen_mode_inv ;
2016-08-15 23:23:25 +02:00
if ( ! tb [ IFLA_STATS64 ] ) {
obj - > link . rx_packets = link_cached - > link . rx_packets ;
obj - > link . rx_bytes = link_cached - > link . rx_bytes ;
obj - > link . tx_packets = link_cached - > link . tx_packets ;
obj - > link . tx_bytes = link_cached - > link . tx_bytes ;
}
2015-10-12 16:07:01 +02:00
}
}
2015-10-12 13:44:44 +02:00
obj - > _link . netlink . lnk = lnk_data ;
2015-11-20 11:54:47 +01:00
obj - > _link . netlink . is_in_netlink = TRUE ;
id_only_handled :
2015-10-12 16:07:01 +02:00
obj_result = obj ;
obj = NULL ;
errout :
return obj_result ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
}
2015-10-12 16:07:01 +02:00
/* Copied and heavily modified from libnl3's addr_msg_parser(). */
static NMPObject *
_new_from_nl_addr ( struct nlmsghdr * nlh , gboolean id_only )
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
{
2015-10-12 16:07:01 +02:00
static struct nla_policy policy [ IFA_MAX + 1 ] = {
[ IFA_LABEL ] = { . type = NLA_STRING ,
. maxlen = IFNAMSIZ } ,
2017-01-16 16:11:05 +01:00
[ IFA_CACHEINFO ] = { . minlen = nm_offsetofend ( struct ifa_cacheinfo , tstamp ) } ,
2015-10-12 16:07:01 +02:00
} ;
const struct ifaddrmsg * ifa ;
struct nlattr * tb [ IFA_MAX + 1 ] ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
int err ;
2015-10-12 16:07:01 +02:00
gboolean is_v4 ;
nm_auto_nmpobj NMPObject * obj = NULL ;
NMPObject * obj_result = NULL ;
int addr_len ;
guint32 lifetime , preferred , timestamp ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
2015-10-12 16:07:01 +02:00
if ( ! nlmsg_valid_hdr ( nlh , sizeof ( * ifa ) ) )
return NULL ;
ifa = nlmsg_data ( nlh ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
2015-10-12 16:07:01 +02:00
if ( ! NM_IN_SET ( ifa - > ifa_family , AF_INET , AF_INET6 ) )
goto errout ;
is_v4 = ifa - > ifa_family = = AF_INET ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
2015-10-12 16:07:01 +02:00
err = nlmsg_parse ( nlh , sizeof ( * ifa ) , tb , IFA_MAX , policy ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
if ( err < 0 )
goto errout ;
2015-10-12 16:07:01 +02:00
addr_len = is_v4
? sizeof ( in_addr_t )
: sizeof ( struct in6_addr ) ;
2015-05-29 09:40:24 +02:00
2016-04-06 18:04:26 +02:00
if ( ifa - > ifa_prefixlen > ( is_v4 ? 32 : 128 ) )
goto errout ;
2015-10-12 16:07:01 +02:00
/*****************************************************************/
2015-05-29 09:40:24 +02:00
2015-10-12 16:07:01 +02:00
obj = nmp_object_new ( is_v4 ? NMP_OBJECT_TYPE_IP4_ADDRESS : NMP_OBJECT_TYPE_IP6_ADDRESS , NULL ) ;
2015-05-29 09:40:24 +02:00
2015-10-12 16:07:01 +02:00
obj - > ip_address . ifindex = ifa - > ifa_index ;
obj - > ip_address . plen = ifa - > ifa_prefixlen ;
2015-05-29 09:40:24 +02:00
2015-10-21 23:17:11 +02:00
_check_addr_or_errout ( tb , IFA_ADDRESS , addr_len ) ;
_check_addr_or_errout ( tb , IFA_LOCAL , addr_len ) ;
if ( is_v4 ) {
/* For IPv4, kernel omits IFA_LOCAL/IFA_ADDRESS if (and only if) they
* are effectively 0.0 .0 .0 ( all - zero ) . */
if ( tb [ IFA_LOCAL ] )
memcpy ( & obj - > ip4_address . address , nla_data ( tb [ IFA_LOCAL ] ) , addr_len ) ;
if ( tb [ IFA_ADDRESS ] )
memcpy ( & obj - > ip4_address . peer_address , nla_data ( tb [ IFA_ADDRESS ] ) , addr_len ) ;
} else {
/* For IPv6, IFA_ADDRESS is always present.
*
* If IFA_LOCAL is missing , IFA_ADDRESS is @ address and @ peer_address
* is : : ( all - zero ) .
*
* If unexpectely IFA_ADDRESS is missing , make the best of it - - but it _should_
* actually be there . */
if ( tb [ IFA_ADDRESS ] | | tb [ IFA_LOCAL ] ) {
if ( tb [ IFA_LOCAL ] ) {
memcpy ( & obj - > ip6_address . address , nla_data ( tb [ IFA_LOCAL ] ) , addr_len ) ;
if ( tb [ IFA_ADDRESS ] )
memcpy ( & obj - > ip6_address . peer_address , nla_data ( tb [ IFA_ADDRESS ] ) , addr_len ) ;
else
obj - > ip6_address . peer_address = obj - > ip6_address . address ;
} else
memcpy ( & obj - > ip6_address . address , nla_data ( tb [ IFA_ADDRESS ] ) , addr_len ) ;
2015-10-12 16:07:01 +02:00
}
}
2015-05-29 09:40:24 +02:00
2016-04-11 13:09:52 +02:00
obj - > ip_address . addr_source = NM_IP_CONFIG_SOURCE_KERNEL ;
2015-05-29 09:40:24 +02:00
2016-02-29 16:50:55 +01:00
obj - > ip_address . n_ifa_flags = tb [ IFA_FLAGS ]
? nla_get_u32 ( tb [ IFA_FLAGS ] )
: ifa - > ifa_flags ;
2015-05-29 09:40:24 +02:00
2015-10-12 16:07:01 +02:00
if ( is_v4 ) {
if ( tb [ IFA_LABEL ] ) {
char label [ IFNAMSIZ ] ;
2015-05-29 09:40:24 +02:00
2015-10-12 16:07:01 +02:00
nla_strlcpy ( label , tb [ IFA_LABEL ] , IFNAMSIZ ) ;
2015-05-29 09:40:24 +02:00
2015-10-12 16:07:01 +02:00
/* Check for ':'; we're only interested in labels used as interface aliases */
if ( strchr ( label , ' : ' ) )
g_strlcpy ( obj - > ip4_address . label , label , sizeof ( obj - > ip4_address . label ) ) ;
}
2015-05-29 09:40:24 +02:00
}
2015-10-12 16:07:01 +02:00
lifetime = NM_PLATFORM_LIFETIME_PERMANENT ;
preferred = NM_PLATFORM_LIFETIME_PERMANENT ;
timestamp = 0 ;
/* IPv6 only */
if ( tb [ IFA_CACHEINFO ] ) {
const struct ifa_cacheinfo * ca = nla_data ( tb [ IFA_CACHEINFO ] ) ;
lifetime = ca - > ifa_valid ;
preferred = ca - > ifa_prefered ;
timestamp = ca - > tstamp ;
}
_addrtime_get_lifetimes ( timestamp ,
lifetime ,
preferred ,
& obj - > ip_address . timestamp ,
& obj - > ip_address . lifetime ,
& obj - > ip_address . preferred ) ;
obj_result = obj ;
obj = NULL ;
errout :
return obj_result ;
2015-05-29 09:40:24 +02:00
}
2015-10-12 16:07:01 +02:00
/* Copied and heavily modified from libnl3's rtnl_route_parse() and parse_multipath(). */
static NMPObject *
_new_from_nl_route ( struct nlmsghdr * nlh , gboolean id_only )
{
static struct nla_policy policy [ RTA_MAX + 1 ] = {
[ RTA_IIF ] = { . type = NLA_U32 } ,
[ RTA_OIF ] = { . type = NLA_U32 } ,
[ RTA_PRIORITY ] = { . type = NLA_U32 } ,
[ RTA_FLOW ] = { . type = NLA_U32 } ,
2017-01-16 16:11:05 +01:00
[ RTA_CACHEINFO ] = { . minlen = nm_offsetofend ( struct rta_cacheinfo , rta_tsage ) } ,
2015-10-12 16:07:01 +02:00
[ RTA_METRICS ] = { . type = NLA_NESTED } ,
[ RTA_MULTIPATH ] = { . type = NLA_NESTED } ,
} ;
const struct rtmsg * rtm ;
struct nlattr * tb [ RTA_MAX + 1 ] ;
int err ;
gboolean is_v4 ;
nm_auto_nmpobj NMPObject * obj = NULL ;
NMPObject * obj_result = NULL ;
int addr_len ;
struct {
gboolean is_present ;
int ifindex ;
NMIPAddr gateway ;
} nh ;
guint32 mss ;
guint32 table ;
if ( ! nlmsg_valid_hdr ( nlh , sizeof ( * rtm ) ) )
return NULL ;
rtm = nlmsg_data ( nlh ) ;
2014-10-22 18:19:54 +02:00
2015-10-12 16:07:01 +02:00
/*****************************************************************
* only handle ~ normal ~ routes .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2015-05-10 09:16:31 +02:00
2015-10-12 16:07:01 +02:00
if ( ! NM_IN_SET ( rtm - > rtm_family , AF_INET , AF_INET6 ) )
goto errout ;
2014-10-22 18:19:54 +02:00
2015-10-12 16:07:01 +02:00
if ( rtm - > rtm_type ! = RTN_UNICAST
| | rtm - > rtm_tos ! = 0 )
goto errout ;
2015-08-30 15:51:20 +02:00
2015-10-12 16:07:01 +02:00
err = nlmsg_parse ( nlh , sizeof ( struct rtmsg ) , tb , RTA_MAX , policy ) ;
if ( err < 0 )
goto errout ;
2014-10-22 18:19:54 +02:00
2015-10-12 16:07:01 +02:00
table = tb [ RTA_TABLE ]
? nla_get_u32 ( tb [ RTA_TABLE ] )
: ( guint32 ) rtm - > rtm_table ;
if ( table ! = RT_TABLE_MAIN )
goto errout ;
2015-04-06 18:29:36 +02:00
2015-10-12 16:07:01 +02:00
/*****************************************************************/
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
2015-10-12 16:07:01 +02:00
is_v4 = rtm - > rtm_family = = AF_INET ;
addr_len = is_v4
? sizeof ( in_addr_t )
: sizeof ( struct in6_addr ) ;
2015-05-10 09:16:31 +02:00
2016-04-06 14:19:05 +02:00
if ( rtm - > rtm_dst_len > ( is_v4 ? 32 : 128 ) )
goto errout ;
2015-10-12 16:07:01 +02:00
/*****************************************************************
* parse nexthops . Only handle routes with one nh .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2014-10-22 18:19:54 +02:00
2015-10-12 16:07:01 +02:00
memset ( & nh , 0 , sizeof ( nh ) ) ;
2014-10-22 18:19:54 +02:00
2015-10-12 16:07:01 +02:00
if ( tb [ RTA_MULTIPATH ] ) {
struct rtnexthop * rtnh = nla_data ( tb [ RTA_MULTIPATH ] ) ;
size_t tlen = nla_len ( tb [ RTA_MULTIPATH ] ) ;
2014-10-22 18:19:54 +02:00
2015-10-12 16:07:01 +02:00
while ( tlen > = sizeof ( * rtnh ) & & tlen > = rtnh - > rtnh_len ) {
2014-10-22 18:19:54 +02:00
2015-10-12 16:07:01 +02:00
if ( nh . is_present ) {
/* we don't support multipath routes. */
goto errout ;
}
nh . is_present = TRUE ;
2014-10-22 18:19:54 +02:00
2015-10-12 16:07:01 +02:00
nh . ifindex = rtnh - > rtnh_ifindex ;
2013-07-27 00:42:10 +02:00
2015-10-12 16:07:01 +02:00
if ( rtnh - > rtnh_len > sizeof ( * rtnh ) ) {
struct nlattr * ntb [ RTA_MAX + 1 ] ;
err = nla_parse ( ntb , RTA_MAX , ( struct nlattr * )
RTNH_DATA ( rtnh ) ,
rtnh - > rtnh_len - sizeof ( * rtnh ) ,
policy ) ;
if ( err < 0 )
goto errout ;
if ( _check_addr_or_errout ( ntb , RTA_GATEWAY , addr_len ) )
memcpy ( & nh . gateway , nla_data ( ntb [ RTA_GATEWAY ] ) , addr_len ) ;
}
2013-03-27 22:23:24 +01:00
2015-10-12 16:07:01 +02:00
tlen - = RTNH_ALIGN ( rtnh - > rtnh_len ) ;
rtnh = RTNH_NEXT ( rtnh ) ;
2013-03-27 22:23:24 +01:00
}
2015-10-12 16:07:01 +02:00
}
if ( tb [ RTA_OIF ]
| | tb [ RTA_GATEWAY ]
| | tb [ RTA_FLOW ] ) {
int ifindex = 0 ;
NMIPAddr gateway = NMIPAddrInit ;
if ( tb [ RTA_OIF ] )
ifindex = nla_get_u32 ( tb [ RTA_OIF ] ) ;
if ( _check_addr_or_errout ( tb , RTA_GATEWAY , addr_len ) )
memcpy ( & gateway , nla_data ( tb [ RTA_GATEWAY ] ) , addr_len ) ;
if ( ! nh . is_present ) {
/* If no nexthops have been provided via RTA_MULTIPATH
* we add it as regular nexthop to maintain backwards
* compatibility */
nh . ifindex = ifindex ;
nh . gateway = gateway ;
} else {
/* Kernel supports new style nexthop configuration,
* verify that it is a duplicate and ignore old - style nexthop . */
if ( nh . ifindex ! = ifindex
| | memcmp ( & nh . gateway , & gateway , addr_len ) ! = 0 )
goto errout ;
2013-03-27 22:23:24 +01:00
}
2015-10-12 16:07:01 +02:00
} else if ( ! nh . is_present )
goto errout ;
2013-03-27 22:23:24 +01:00
2015-10-12 16:07:01 +02:00
/*****************************************************************/
2013-03-27 22:23:24 +01:00
2015-10-12 16:07:01 +02:00
mss = 0 ;
if ( tb [ RTA_METRICS ] ) {
struct nlattr * mtb [ RTAX_MAX + 1 ] ;
int i ;
2014-01-07 17:21:12 +01:00
2015-10-12 16:07:01 +02:00
err = nla_parse_nested ( mtb , RTAX_MAX , tb [ RTA_METRICS ] , NULL ) ;
if ( err < 0 )
goto errout ;
2014-01-07 17:21:12 +01:00
2015-10-12 16:07:01 +02:00
for ( i = 1 ; i < = RTAX_MAX ; i + + ) {
if ( mtb [ i ] ) {
if ( i = = RTAX_ADVMSS ) {
if ( nla_len ( mtb [ i ] ) > = sizeof ( uint32_t ) )
mss = nla_get_u32 ( mtb [ i ] ) ;
break ;
}
}
}
}
2014-07-24 15:57:08 -05:00
2015-10-12 16:07:01 +02:00
/*****************************************************************/
2014-07-24 15:57:08 -05:00
2015-10-12 16:07:01 +02:00
obj = nmp_object_new ( is_v4 ? NMP_OBJECT_TYPE_IP4_ROUTE : NMP_OBJECT_TYPE_IP6_ROUTE , NULL ) ;
2015-06-19 15:38:41 +02:00
2015-10-12 16:07:01 +02:00
obj - > ip_route . ifindex = nh . ifindex ;
2015-06-19 16:58:28 +02:00
2015-10-12 16:07:01 +02:00
if ( _check_addr_or_errout ( tb , RTA_DST , addr_len ) )
memcpy ( obj - > ip_route . network_ptr , nla_data ( tb [ RTA_DST ] ) , addr_len ) ;
2015-06-19 16:58:28 +02:00
2015-10-12 16:07:01 +02:00
obj - > ip_route . plen = rtm - > rtm_dst_len ;
2015-06-19 16:58:28 +02:00
2015-10-12 16:07:01 +02:00
if ( tb [ RTA_PRIORITY ] )
obj - > ip_route . metric = nla_get_u32 ( tb [ RTA_PRIORITY ] ) ;
2013-03-27 22:23:24 +01:00
2015-10-12 16:07:01 +02:00
if ( is_v4 )
obj - > ip4_route . gateway = nh . gateway . addr4 ;
else
obj - > ip6_route . gateway = nh . gateway . addr6 ;
2015-04-13 15:44:10 -05:00
2015-10-12 16:07:01 +02:00
if ( is_v4 )
obj - > ip4_route . scope_inv = nm_platform_route_scope_inv ( rtm - > rtm_scope ) ;
2015-04-13 15:44:10 -05:00
2015-10-12 16:07:01 +02:00
if ( is_v4 ) {
if ( _check_addr_or_errout ( tb , RTA_PREFSRC , addr_len ) )
2015-11-05 11:54:54 +01:00
memcpy ( & obj - > ip4_route . pref_src , nla_data ( tb [ RTA_PREFSRC ] ) , addr_len ) ;
2015-04-13 15:44:10 -05:00
}
2015-10-12 16:07:01 +02:00
obj - > ip_route . mss = mss ;
if ( NM_FLAGS_HAS ( rtm - > rtm_flags , RTM_F_CLONED ) ) {
/* we must not straight way reject cloned routes, because we might have cached
* a non - cloned route . If we now receive an update of the route with the route
* being cloned , we must still return the object , so that we can remove the old
* one from the cache .
*
* This happens , because this route is not nmp_object_is_alive ( ) .
* */
2016-04-11 15:32:45 +02:00
obj - > ip_route . rt_cloned = TRUE ;
}
obj - > ip_route . rt_source = nmp_utils_ip_config_source_from_rtprot ( rtm - > rtm_protocol ) ;
2015-10-12 16:07:01 +02:00
obj_result = obj ;
obj = NULL ;
errout :
return obj_result ;
}
/**
* nmp_object_new_from_nl :
* @ platform : ( allow - none ) : for creating certain objects , the constructor wants to check
* sysfs . For this the platform instance is needed . If missing , the object might not
* be correctly detected .
* @ cache : ( allow - none ) : for certain objects , the netlink message doesn ' t contain all the information .
* If a cache is given , the object is completed with information from the cache .
* @ nlh : the netlink message header
* @ id_only : whether only to create an empty object with only the ID fields set .
*
* Returns : % NULL or a newly created NMPObject instance .
* */
static NMPObject *
nmp_object_new_from_nl ( NMPlatform * platform , const NMPCache * cache , struct nl_msg * msg , gboolean id_only )
2015-07-01 12:52:21 +02:00
{
2015-10-12 16:07:01 +02:00
struct nlmsghdr * msghdr ;
2015-07-01 12:52:21 +02:00
2015-10-12 16:07:01 +02:00
if ( nlmsg_get_proto ( msg ) ! = NETLINK_ROUTE )
return NULL ;
2015-07-01 12:52:21 +02:00
2015-10-12 16:07:01 +02:00
msghdr = nlmsg_hdr ( msg ) ;
2015-07-01 12:52:21 +02:00
2015-10-12 16:07:01 +02:00
switch ( msghdr - > nlmsg_type ) {
case RTM_NEWLINK :
case RTM_DELLINK :
case RTM_GETLINK :
case RTM_SETLINK :
return _new_from_nl_link ( platform , cache , msghdr , id_only ) ;
case RTM_NEWADDR :
case RTM_DELADDR :
case RTM_GETADDR :
return _new_from_nl_addr ( msghdr , id_only ) ;
case RTM_NEWROUTE :
case RTM_DELROUTE :
case RTM_GETROUTE :
return _new_from_nl_route ( msghdr , id_only ) ;
default :
return NULL ;
2015-07-01 12:52:21 +02:00
}
}
2016-10-02 18:22:50 +02:00
/*****************************************************************************/
2015-10-12 16:07:01 +02:00
2015-10-20 09:27:16 +02:00
static gboolean
_nl_msg_new_link_set_afspec ( struct nl_msg * msg ,
2016-04-30 16:48:32 +02:00
int addr_gen_mode ,
NMUtilsIPv6IfaceId * iid )
2015-10-20 09:27:16 +02:00
{
struct nlattr * af_spec ;
struct nlattr * af_attr ;
nm_assert ( msg ) ;
if ( ! ( af_spec = nla_nest_start ( msg , IFLA_AF_SPEC ) ) )
goto nla_put_failure ;
2016-04-30 16:48:32 +02:00
if ( addr_gen_mode > = 0 | | iid ) {
2015-10-20 09:27:16 +02:00
if ( ! ( af_attr = nla_nest_start ( msg , AF_INET6 ) ) )
goto nla_put_failure ;
2016-04-30 16:48:32 +02:00
if ( addr_gen_mode > = 0 )
NLA_PUT_U8 ( msg , IFLA_INET6_ADDR_GEN_MODE , addr_gen_mode ) ;
if ( iid ) {
struct in6_addr i6_token = { . s6_addr = { 0 , } } ;
nm_utils_ipv6_addr_set_interface_identifier ( & i6_token , * iid ) ;
NLA_PUT ( msg , IFLA_INET6_TOKEN , sizeof ( struct in6_addr ) , & i6_token ) ;
}
2015-10-20 09:27:16 +02:00
nla_nest_end ( msg , af_attr ) ;
}
nla_nest_end ( msg , af_spec ) ;
return TRUE ;
nla_put_failure :
return FALSE ;
}
static gboolean
_nl_msg_new_link_set_linkinfo ( struct nl_msg * msg ,
NMLinkType link_type )
{
struct nlattr * info ;
const char * kind ;
nm_assert ( msg ) ;
kind = nm_link_type_to_rtnl_type_string ( link_type ) ;
if ( ! kind )
goto nla_put_failure ;
if ( ! ( info = nla_nest_start ( msg , IFLA_LINKINFO ) ) )
goto nla_put_failure ;
NLA_PUT_STRING ( msg , IFLA_INFO_KIND , kind ) ;
nla_nest_end ( msg , info ) ;
return TRUE ;
nla_put_failure :
return FALSE ;
}
static gboolean
_nl_msg_new_link_set_linkinfo_vlan ( struct nl_msg * msg ,
int vlan_id ,
guint32 flags_mask ,
guint32 flags_set ,
2015-10-27 16:14:54 +01:00
const NMVlanQosMapping * ingress_qos ,
2015-10-20 09:27:16 +02:00
int ingress_qos_len ,
2015-10-27 16:14:54 +01:00
const NMVlanQosMapping * egress_qos ,
2015-10-20 09:27:16 +02:00
int egress_qos_len )
{
struct nlattr * info ;
struct nlattr * data ;
guint i ;
2015-10-27 16:14:54 +01:00
gboolean has_any_vlan_properties = FALSE ;
# define VLAN_XGRESS_PRIO_VALID(from) (((from) & ~(guint32) 0x07) == 0)
2015-10-20 09:27:16 +02:00
nm_assert ( msg ) ;
2015-10-27 16:14:54 +01:00
/* We must not create an empty IFLA_LINKINFO section. Otherwise, kernel
* rejects the request as invalid . */
if ( flags_mask ! = 0
| | vlan_id > = 0 )
has_any_vlan_properties = TRUE ;
if ( ! has_any_vlan_properties
& & ingress_qos & & ingress_qos_len > 0 ) {
for ( i = 0 ; i < ingress_qos_len ; i + + ) {
if ( VLAN_XGRESS_PRIO_VALID ( ingress_qos [ i ] . from ) ) {
has_any_vlan_properties = TRUE ;
break ;
}
}
}
if ( ! has_any_vlan_properties
& & egress_qos & & egress_qos_len > 0 ) {
for ( i = 0 ; i < egress_qos_len ; i + + ) {
if ( VLAN_XGRESS_PRIO_VALID ( egress_qos [ i ] . to ) ) {
has_any_vlan_properties = TRUE ;
break ;
}
}
}
if ( ! has_any_vlan_properties )
return TRUE ;
2015-10-20 09:27:16 +02:00
if ( ! ( info = nla_nest_start ( msg , IFLA_LINKINFO ) ) )
goto nla_put_failure ;
NLA_PUT_STRING ( msg , IFLA_INFO_KIND , " vlan " ) ;
if ( ! ( data = nla_nest_start ( msg , IFLA_INFO_DATA ) ) )
goto nla_put_failure ;
if ( vlan_id > = 0 )
NLA_PUT_U16 ( msg , IFLA_VLAN_ID , vlan_id ) ;
if ( flags_mask ! = 0 ) {
struct ifla_vlan_flags flags = {
. flags = flags_mask & flags_set ,
. mask = flags_mask ,
} ;
NLA_PUT ( msg , IFLA_VLAN_FLAGS , sizeof ( flags ) , & flags ) ;
}
if ( ingress_qos & & ingress_qos_len > 0 ) {
2015-10-27 16:14:54 +01:00
struct nlattr * qos = NULL ;
for ( i = 0 ; i < ingress_qos_len ; i + + ) {
/* Silently ignore invalid mappings. Kernel would truncate
* them and modify the wrong mapping . */
if ( VLAN_XGRESS_PRIO_VALID ( ingress_qos [ i ] . from ) ) {
if ( ! qos ) {
if ( ! ( qos = nla_nest_start ( msg , IFLA_VLAN_INGRESS_QOS ) ) )
goto nla_put_failure ;
}
NLA_PUT ( msg , i , sizeof ( ingress_qos [ i ] ) , & ingress_qos [ i ] ) ;
}
}
2015-10-20 09:27:16 +02:00
2015-10-27 16:14:54 +01:00
if ( qos )
nla_nest_end ( msg , qos ) ;
2015-10-20 09:27:16 +02:00
}
if ( egress_qos & & egress_qos_len > 0 ) {
2015-10-27 16:14:54 +01:00
struct nlattr * qos = NULL ;
2015-10-20 09:27:16 +02:00
2015-10-27 16:14:54 +01:00
for ( i = 0 ; i < egress_qos_len ; i + + ) {
if ( VLAN_XGRESS_PRIO_VALID ( egress_qos [ i ] . to ) ) {
if ( ! qos ) {
if ( ! ( qos = nla_nest_start ( msg , IFLA_VLAN_EGRESS_QOS ) ) )
goto nla_put_failure ;
}
NLA_PUT ( msg , i , sizeof ( egress_qos [ i ] ) , & egress_qos [ i ] ) ;
}
}
2015-10-20 09:27:16 +02:00
2015-10-27 16:14:54 +01:00
if ( qos )
nla_nest_end ( msg , qos ) ;
2015-10-20 09:27:16 +02:00
}
nla_nest_end ( msg , data ) ;
nla_nest_end ( msg , info ) ;
return TRUE ;
nla_put_failure :
return FALSE ;
}
static struct nl_msg *
_nl_msg_new_link ( int nlmsg_type ,
int nlmsg_flags ,
int ifindex ,
const char * ifname ,
2015-11-02 14:27:22 +01:00
unsigned flags_mask ,
unsigned flags_set )
2015-10-20 09:27:16 +02:00
{
struct nl_msg * msg ;
struct ifinfomsg ifi = {
2015-11-02 14:27:22 +01:00
. ifi_change = flags_mask ,
. ifi_flags = flags_set ,
2015-10-20 09:27:16 +02:00
. ifi_index = ifindex ,
} ;
nm_assert ( NM_IN_SET ( nlmsg_type , RTM_DELLINK , RTM_NEWLINK , RTM_GETLINK ) ) ;
if ( ! ( msg = nlmsg_alloc_simple ( nlmsg_type , nlmsg_flags ) ) )
g_return_val_if_reached ( NULL ) ;
if ( nlmsg_append ( msg , & ifi , sizeof ( ifi ) , NLMSG_ALIGNTO ) < 0 )
goto nla_put_failure ;
if ( ifname )
NLA_PUT_STRING ( msg , IFLA_IFNAME , ifname ) ;
return msg ;
nla_put_failure :
nlmsg_free ( msg ) ;
g_return_val_if_reached ( NULL ) ;
}
/* Copied and modified from libnl3's build_addr_msg(). */
static struct nl_msg *
_nl_msg_new_address ( int nlmsg_type ,
int nlmsg_flags ,
int family ,
int ifindex ,
gconstpointer address ,
2016-04-06 18:04:26 +02:00
guint8 plen ,
2015-10-20 09:27:16 +02:00
gconstpointer peer_address ,
guint32 flags ,
int scope ,
guint32 lifetime ,
guint32 preferred ,
const char * label )
{
struct nl_msg * msg ;
struct ifaddrmsg am = {
. ifa_family = family ,
. ifa_index = ifindex ,
. ifa_prefixlen = plen ,
. ifa_flags = flags ,
} ;
gsize addr_len ;
nm_assert ( NM_IN_SET ( family , AF_INET , AF_INET6 ) ) ;
nm_assert ( NM_IN_SET ( nlmsg_type , RTM_NEWADDR , RTM_DELADDR ) ) ;
msg = nlmsg_alloc_simple ( nlmsg_type , nlmsg_flags ) ;
if ( ! msg )
g_return_val_if_reached ( NULL ) ;
if ( scope = = - 1 ) {
/* Allow having scope unset, and detect the scope (including IPv4 compatibility hack). */
if ( family = = AF_INET
& & address
& & * ( ( char * ) address ) = = 127 )
scope = RT_SCOPE_HOST ;
else
scope = RT_SCOPE_UNIVERSE ;
}
am . ifa_scope = scope ,
addr_len = family = = AF_INET ? sizeof ( in_addr_t ) : sizeof ( struct in6_addr ) ;
if ( nlmsg_append ( msg , & am , sizeof ( am ) , NLMSG_ALIGNTO ) < 0 )
goto nla_put_failure ;
if ( address )
NLA_PUT ( msg , IFA_LOCAL , addr_len , address ) ;
if ( peer_address )
NLA_PUT ( msg , IFA_ADDRESS , addr_len , peer_address ) ;
else if ( address )
NLA_PUT ( msg , IFA_ADDRESS , addr_len , address ) ;
if ( label & & label [ 0 ] )
NLA_PUT_STRING ( msg , IFA_LABEL , label ) ;
if ( family = = AF_INET
& & nlmsg_type ! = RTM_DELADDR
& & address
& & * ( ( in_addr_t * ) address ) ! = 0 ) {
in_addr_t broadcast ;
broadcast = * ( ( in_addr_t * ) address ) | ~ nm_utils_ip4_prefix_to_netmask ( plen ) ;
NLA_PUT ( msg , IFA_BROADCAST , addr_len , & broadcast ) ;
}
if ( lifetime ! = NM_PLATFORM_LIFETIME_PERMANENT
| | preferred ! = NM_PLATFORM_LIFETIME_PERMANENT ) {
struct ifa_cacheinfo ca = {
. ifa_valid = lifetime ,
. ifa_prefered = preferred ,
} ;
NLA_PUT ( msg , IFA_CACHEINFO , sizeof ( ca ) , & ca ) ;
}
2016-02-29 17:06:21 +01:00
if ( flags & ~ ( ( guint32 ) 0xFF ) ) {
2015-10-20 09:27:16 +02:00
/* only set the IFA_FLAGS attribute, if they actually contain additional
* flags that are not already set to am . ifa_flags .
*
* Older kernels refuse RTM_NEWADDR and RTM_NEWROUTE messages with EINVAL
* if they contain unknown netlink attributes . See net / core / rtnetlink . c , which
* was fixed by kernel commit 661 d2967b3f1b34eeaa7e212e7b9bbe8ee072b59 . */
NLA_PUT_U32 ( msg , IFA_FLAGS , flags ) ;
}
return msg ;
nla_put_failure :
nlmsg_free ( msg ) ;
g_return_val_if_reached ( NULL ) ;
}
/* Copied and modified from libnl3's build_route_msg() and rtnl_route_build_msg(). */
static struct nl_msg *
_nl_msg_new_route ( int nlmsg_type ,
int nlmsg_flags ,
int family ,
int ifindex ,
NMIPConfigSource source ,
unsigned char scope ,
gconstpointer network ,
2016-04-06 14:19:05 +02:00
guint8 plen ,
2015-10-20 09:27:16 +02:00
gconstpointer gateway ,
guint32 metric ,
guint32 mss ,
gconstpointer pref_src )
{
struct nl_msg * msg ;
struct rtmsg rtmsg = {
. rtm_family = family ,
. rtm_tos = 0 ,
. rtm_table = RT_TABLE_MAIN , /* omit setting RTA_TABLE attribute */
platform: extend NMIPConfigSource to preserve the rtm_protocol field
For addresses (NMPlatformIPAddress) the @addr_source field is ignored
on a platform level. That is, all addresses inside the platform cache
have this value set to NM_IP_CONFIG_SOURCE_KERNEL. Maybe, for that reason,
the source should not be a part of the NMPlatformIPAddress structure, but
it is convenient for users to piggy back the source inside the platform
address structure.
For routes, the source is stored in NMPlatformIPRoute's @rt_source
field. When adding a route to kernel, we set the @rtm_protocol of the
route depending on the source. However, we want to map different source
values to the same protocol value.
On the other hand, when kernel sends us a route that gets put inside
the cache, we must preserve the protocol value and must not map
different protocol values to the same source.
The reason is, that a user can add two routes that only differ by
@rtm_protocol. In that sense, the @rtm_protocol fields is part of the
unique ID of a kernel route, and thus different values must map to
different sources.
Fix this, by extending the range of NMIPConfigSource to contain
a range of protocol fields.
2016-04-11 17:35:29 +02:00
. rtm_protocol = nmp_utils_ip_config_source_coerce_to_rtprot ( source ) ,
2015-10-20 09:27:16 +02:00
. rtm_scope = scope ,
. rtm_type = RTN_UNICAST ,
. rtm_flags = 0 ,
. rtm_dst_len = plen ,
. rtm_src_len = 0 ,
} ;
NMIPAddr network_clean ;
gsize addr_len ;
nm_assert ( NM_IN_SET ( family , AF_INET , AF_INET6 ) ) ;
nm_assert ( NM_IN_SET ( nlmsg_type , RTM_NEWROUTE , RTM_DELROUTE ) ) ;
nm_assert ( network ) ;
msg = nlmsg_alloc_simple ( nlmsg_type , nlmsg_flags ) ;
if ( ! msg )
g_return_val_if_reached ( NULL ) ;
if ( nlmsg_append ( msg , & rtmsg , sizeof ( rtmsg ) , NLMSG_ALIGNTO ) < 0 )
goto nla_put_failure ;
addr_len = family = = AF_INET ? sizeof ( in_addr_t ) : sizeof ( struct in6_addr ) ;
2016-04-12 15:55:44 +02:00
nm_utils_ipx_address_clear_host_address ( family , & network_clean , network , plen ) ;
2015-10-20 09:27:16 +02:00
NLA_PUT ( msg , RTA_DST , addr_len , & network_clean ) ;
NLA_PUT_U32 ( msg , RTA_PRIORITY , metric ) ;
if ( pref_src )
NLA_PUT ( msg , RTA_PREFSRC , addr_len , pref_src ) ;
if ( mss > 0 ) {
struct nlattr * metrics ;
metrics = nla_nest_start ( msg , RTA_METRICS ) ;
if ( ! metrics )
goto nla_put_failure ;
NLA_PUT_U32 ( msg , RTAX_ADVMSS , mss ) ;
nla_nest_end ( msg , metrics ) ;
}
/* We currently don't have need for multi-hop routes... */
if ( gateway
& & memcmp ( gateway , & nm_ip_addr_zero , addr_len ) ! = 0 )
NLA_PUT ( msg , RTA_GATEWAY , addr_len , gateway ) ;
NLA_PUT_U32 ( msg , RTA_OIF , ifindex ) ;
return msg ;
nla_put_failure :
nlmsg_free ( msg ) ;
g_return_val_if_reached ( NULL ) ;
}
2016-10-02 18:22:50 +02:00
/*****************************************************************************/
2015-10-20 09:27:16 +02:00
static int _support_kernel_extended_ifa_flags = - 1 ;
2015-04-14 23:14:06 +02:00
2015-10-20 09:27:16 +02:00
# define _support_kernel_extended_ifa_flags_still_undecided() (G_UNLIKELY (_support_kernel_extended_ifa_flags == -1))
2015-04-14 23:14:06 +02:00
2015-10-12 16:07:01 +02:00
static void
_support_kernel_extended_ifa_flags_detect ( struct nl_msg * msg )
{
struct nlmsghdr * msg_hdr ;
2015-04-14 23:14:06 +02:00
2015-10-12 16:07:01 +02:00
if ( ! _support_kernel_extended_ifa_flags_still_undecided ( ) )
return ;
2015-04-14 23:14:06 +02:00
2015-10-12 16:07:01 +02:00
msg_hdr = nlmsg_hdr ( msg ) ;
if ( msg_hdr - > nlmsg_type ! = RTM_NEWADDR )
return ;
/* the extended address flags are only set for AF_INET6 */
if ( ( ( struct ifaddrmsg * ) nlmsg_data ( msg_hdr ) ) - > ifa_family ! = AF_INET6 )
return ;
/* see if the nl_msg contains the IFA_FLAGS attribute. If it does,
* we assume , that the kernel supports extended flags , IFA_F_MANAGETEMPADDR
* and IFA_F_NOPREFIXROUTE ( they were added together ) .
* */
2015-10-20 09:27:16 +02:00
_support_kernel_extended_ifa_flags = ! ! nlmsg_find_attr ( msg_hdr , sizeof ( struct ifaddrmsg ) , 8 /* IFA_FLAGS */ ) ;
_LOG2D ( " support: kernel-extended-ifa-flags: %ssupported " , _support_kernel_extended_ifa_flags ? " " : " not " ) ;
2015-04-14 23:14:06 +02:00
}
2015-10-12 16:07:01 +02:00
static gboolean
_support_kernel_extended_ifa_flags_get ( void )
2015-04-14 23:14:06 +02:00
{
2015-10-12 16:07:01 +02:00
if ( _support_kernel_extended_ifa_flags_still_undecided ( ) ) {
2016-02-04 16:31:01 +01:00
_LOG2W ( " support: kernel-extended-ifa-flags: unable to detect kernel support for handling IPv6 temporary addresses. Assume support " ) ;
_support_kernel_extended_ifa_flags = 1 ;
2015-10-12 16:07:01 +02:00
}
2015-10-20 09:27:16 +02:00
return _support_kernel_extended_ifa_flags ;
2015-10-12 16:07:01 +02:00
}
2015-04-14 23:14:06 +02:00
2015-10-12 16:07:01 +02:00
/******************************************************************
* NMPlatform types and functions
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2015-04-14 23:14:06 +02:00
2015-12-14 14:47:41 +01:00
typedef struct {
guint32 seq_number ;
WaitForNlResponseResult seq_result ;
2016-04-08 12:25:41 +02:00
gint64 timeout_abs_ns ;
2015-12-14 14:47:41 +01:00
WaitForNlResponseResult * out_seq_result ;
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
gint * out_refresh_all_in_progess ;
2015-12-14 14:47:41 +01:00
} DelayedActionWaitForNlResponseData ;
2016-09-29 13:49:01 +02:00
typedef struct {
2015-12-15 10:51:26 +01:00
struct nl_sock * nlh ;
2015-12-14 11:53:46 +01:00
guint32 nlh_seq_next ;
2016-04-07 21:16:51 +02:00
# ifdef NM_MORE_LOGGING
2015-12-14 14:47:41 +01:00
guint32 nlh_seq_last_handled ;
2016-04-07 21:16:51 +02:00
# endif
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
guint32 nlh_seq_last_seen ;
2015-10-12 16:07:01 +02:00
NMPCache * cache ;
GIOChannel * event_channel ;
guint event_id ;
2015-04-14 23:14:06 +02:00
2015-10-12 16:07:01 +02:00
gboolean sysctl_get_warned ;
GHashTable * sysctl_get_prev_values ;
2015-04-14 23:14:06 +02:00
2015-10-12 16:07:01 +02:00
GUdevClient * udev_client ;
2015-04-14 23:14:06 +02:00
2015-10-12 16:07:01 +02:00
struct {
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
/* which delayed actions are scheduled, as marked in @flags.
* Some types have additional arguments in the fields below . */
2015-10-12 16:07:01 +02:00
DelayedActionType flags ;
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
/* counter that a refresh all action is in progress, separated
* by type . */
gint refresh_all_in_progess [ _DELAYED_ACTION_IDX_REFRESH_ALL_NUM ] ;
2015-10-12 16:07:01 +02:00
GPtrArray * list_master_connected ;
GPtrArray * list_refresh_link ;
2015-12-14 14:47:41 +01:00
GArray * list_wait_for_nl_response ;
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
2015-10-12 16:07:01 +02:00
gint is_handling ;
} delayed_action ;
2015-04-14 23:14:06 +02:00
2015-10-12 16:07:01 +02:00
GHashTable * prune_candidates ;
2015-04-14 23:14:06 +02:00
2015-10-12 16:07:01 +02:00
GHashTable * wifi_data ;
2016-09-29 13:49:01 +02:00
} NMLinuxPlatformPrivate ;
struct _NMLinuxPlatform {
NMPlatform parent ;
NMLinuxPlatformPrivate _priv ;
2015-10-12 16:07:01 +02:00
} ;
2015-04-14 23:14:06 +02:00
2016-09-29 13:49:01 +02:00
struct _NMLinuxPlatformClass {
NMPlatformClass parent ;
} ;
G_DEFINE_TYPE ( NMLinuxPlatform , nm_linux_platform , NM_TYPE_PLATFORM )
2015-10-12 16:07:01 +02:00
static inline NMLinuxPlatformPrivate *
NM_LINUX_PLATFORM_GET_PRIVATE ( const void * self )
{
nm_assert ( NM_IS_LINUX_PLATFORM ( self ) ) ;
2015-06-22 13:22:48 +02:00
2016-09-29 13:49:01 +02:00
return & ( ( ( NMLinuxPlatform * ) self ) - > _priv ) ;
2015-04-14 23:14:06 +02:00
}
2016-04-07 12:14:53 +02:00
NMPlatform *
nm_linux_platform_new ( gboolean netns_support )
{
return g_object_new ( NM_TYPE_LINUX_PLATFORM ,
NM_PLATFORM_REGISTER_SINGLETON , FALSE ,
NM_PLATFORM_NETNS_SUPPORT , netns_support ,
NULL ) ;
}
2015-10-12 16:07:01 +02:00
void
nm_linux_platform_setup ( void )
{
g_object_new ( NM_TYPE_LINUX_PLATFORM ,
NM_PLATFORM_REGISTER_SINGLETON , TRUE ,
2016-04-07 12:14:53 +02:00
NM_PLATFORM_NETNS_SUPPORT , FALSE ,
2015-10-12 16:07:01 +02:00
NULL ) ;
}
2015-04-14 23:14:06 +02:00
2016-02-19 01:06:28 +01:00
static void
2016-12-09 10:11:29 +01:00
ASSERT_NETNS_CURRENT ( NMPlatform * platform )
2016-02-19 01:06:28 +01:00
{
nm_assert ( NM_IS_LINUX_PLATFORM ( platform ) ) ;
nm_assert ( NM_IN_SET ( nm_platform_netns_get ( platform ) , NULL , nmp_netns_get_current ( ) ) ) ;
}
2016-12-09 10:11:29 +01:00
/*****************************************************************************/
2016-12-08 14:29:00 +01:00
# define ASSERT_SYSCTL_ARGS(pathid, dirfd, path) \
G_STMT_START { \
const char * const _pathid = ( pathid ) ; \
const int _dirfd = ( dirfd ) ; \
const char * const _path = ( path ) ; \
\
nm_assert ( _path & & _path [ 0 ] ) ; \
g_assert ( ! strstr ( _path , " /../ " ) ) ; \
if ( _dirfd < 0 ) { \
nm_assert ( ! _pathid ) ; \
nm_assert ( _path [ 0 ] = = ' / ' ) ; \
nm_assert ( g_str_has_prefix ( _path , " /proc/sys/ " ) \
| | g_str_has_prefix ( _path , " /sys/ " ) ) ; \
} else { \
nm_assert ( _pathid & & _pathid [ 0 ] & & _pathid [ 0 ] ! = ' / ' ) ; \
nm_assert ( _path [ 0 ] ! = ' / ' ) ; \
} \
} G_STMT_END
2015-12-15 11:14:34 +01:00
static void
2016-12-08 15:12:52 +01:00
_log_dbg_sysctl_set_impl ( NMPlatform * platform , const char * pathid , int dirfd , const char * path , const char * value )
2015-12-15 11:14:34 +01:00
{
GError * error = NULL ;
char * contents , * contents_escaped ;
char * value_escaped = g_strescape ( value , NULL ) ;
2016-12-08 15:12:52 +01:00
if ( nm_utils_file_get_contents ( dirfd , path , 1 * 1024 * 1024 , & contents , NULL , & error ) < 0 ) {
_LOGD ( " sysctl: setting '%s' to '%s' (current value cannot be read: %s) " , pathid , value_escaped , error - > message ) ;
2015-12-15 11:14:34 +01:00
g_clear_error ( & error ) ;
} else {
g_strstrip ( contents ) ;
contents_escaped = g_strescape ( contents , NULL ) ;
if ( strcmp ( contents , value ) = = 0 )
2016-12-08 15:12:52 +01:00
_LOGD ( " sysctl: setting '%s' to '%s' (current value is identical) " , pathid , value_escaped ) ;
2015-12-15 11:14:34 +01:00
else
2016-12-08 15:12:52 +01:00
_LOGD ( " sysctl: setting '%s' to '%s' (current value is '%s') " , pathid , value_escaped , contents_escaped ) ;
2015-12-15 11:14:34 +01:00
g_free ( contents ) ;
g_free ( contents_escaped ) ;
}
g_free ( value_escaped ) ;
}
2016-12-08 15:12:52 +01:00
# define _log_dbg_sysctl_set(platform, pathid, dirfd, path, value) \
2015-12-15 11:14:34 +01:00
G_STMT_START { \
if ( _LOGD_ENABLED ( ) ) { \
2016-12-08 15:12:52 +01:00
_log_dbg_sysctl_set_impl ( platform , pathid , dirfd , path , value ) ; \
2015-12-15 11:14:34 +01:00
} \
} G_STMT_END
static gboolean
2016-12-08 14:29:00 +01:00
sysctl_set ( NMPlatform * platform , const char * pathid , int dirfd , const char * path , const char * value )
2015-12-15 11:14:34 +01:00
{
2016-02-19 01:06:28 +01:00
nm_auto_pop_netns NMPNetns * netns = NULL ;
2016-02-23 23:54:43 +01:00
int fd , tries ;
gssize nwrote ;
gsize len ;
2015-12-15 11:14:34 +01:00
char * actual ;
2016-02-23 23:54:43 +01:00
gs_free char * actual_free = NULL ;
2016-04-20 11:09:25 +02:00
int errsv ;
2015-12-15 11:14:34 +01:00
g_return_val_if_fail ( path ! = NULL , FALSE ) ;
g_return_val_if_fail ( value ! = NULL , FALSE ) ;
2016-12-08 14:29:00 +01:00
ASSERT_SYSCTL_ARGS ( pathid , dirfd , path ) ;
2015-12-15 11:14:34 +01:00
2016-12-08 15:12:52 +01:00
if ( dirfd < 0 ) {
if ( ! nm_platform_netns_push ( platform , & netns ) ) {
errno = ENETDOWN ;
return FALSE ;
}
2016-02-19 01:06:28 +01:00
2016-12-08 15:12:52 +01:00
pathid = path ;
fd = open ( path , O_WRONLY | O_TRUNC | O_CLOEXEC ) ;
if ( fd = = - 1 ) {
errsv = errno ;
if ( errsv = = ENOENT ) {
_LOGD ( " sysctl: failed to open '%s': (%d) %s " ,
pathid , errsv , strerror ( errsv ) ) ;
} else {
_LOGE ( " sysctl: failed to open '%s': (%d) %s " ,
pathid , errsv , strerror ( errsv ) ) ;
}
errno = errsv ;
return FALSE ;
}
} else {
fd = openat ( dirfd , path , O_WRONLY | O_TRUNC | O_CLOEXEC ) ;
if ( fd = = - 1 ) {
errsv = errno ;
if ( errsv = = ENOENT ) {
_LOGD ( " sysctl: failed to openat '%s': (%d) %s " ,
pathid , errsv , strerror ( errsv ) ) ;
} else {
_LOGE ( " sysctl: failed to openat '%s': (%d) %s " ,
pathid , errsv , strerror ( errsv ) ) ;
}
errno = errsv ;
return FALSE ;
2015-12-15 11:14:34 +01:00
}
}
2016-12-08 15:12:52 +01:00
_log_dbg_sysctl_set ( platform , pathid , dirfd , path , value ) ;
2015-12-15 11:14:34 +01:00
/* Most sysfs and sysctl options don't care about a trailing LF, while some
* ( like infiniband ) do . So always add the LF . Also , neither sysfs nor
* sysctl support partial writes so the LF must be added to the string we ' re
* about to write .
*/
2016-02-23 23:54:43 +01:00
len = strlen ( value ) + 1 ;
if ( len > 512 )
actual = actual_free = g_malloc ( len + 1 ) ;
else
actual = g_alloca ( len + 1 ) ;
memcpy ( actual , value , len - 1 ) ;
actual [ len - 1 ] = ' \n ' ;
actual [ len ] = ' \0 ' ;
2015-12-15 11:14:34 +01:00
/* Try to write the entire value three times if a partial write occurs */
2016-04-20 11:09:25 +02:00
errsv = 0 ;
2016-10-28 18:15:45 +00:00
for ( tries = 0 , nwrote = 0 ; tries < 3 & & nwrote < len - 1 ; tries + + ) {
2015-12-15 11:14:34 +01:00
nwrote = write ( fd , actual , len ) ;
if ( nwrote = = - 1 ) {
2016-04-20 11:09:25 +02:00
errsv = errno ;
if ( errsv = = EINTR ) {
2015-12-15 11:14:34 +01:00
_LOGD ( " sysctl: interrupted, will try again " ) ;
continue ;
}
break ;
}
}
2016-04-20 11:09:25 +02:00
if ( nwrote = = - 1 & & errsv ! = EEXIST ) {
2015-12-15 11:14:34 +01:00
_LOGE ( " sysctl: failed to set '%s' to '%s': (%d) %s " ,
2016-04-20 11:09:25 +02:00
path , value , errsv , strerror ( errsv ) ) ;
2016-10-28 18:15:45 +00:00
} else if ( nwrote < len - 1 ) {
2015-12-15 11:14:34 +01:00
_LOGE ( " sysctl: failed to set '%s' to '%s' after three attempts " ,
path , value ) ;
}
2016-10-28 18:15:45 +00:00
if ( nwrote < len - 1 ) {
2016-04-20 11:09:25 +02:00
if ( close ( fd ) ! = 0 ) {
if ( errsv ! = 0 )
errno = errsv ;
} else if ( errsv ! = 0 )
errno = errsv ;
else
errno = EIO ;
return FALSE ;
}
if ( close ( fd ) ! = 0 ) {
/* errno is already properly set. */
return FALSE ;
}
/* success. errno is undefined (no need to set). */
return TRUE ;
2015-12-15 11:14:34 +01:00
}
static GSList * sysctl_clear_cache_list ;
2016-03-01 10:17:44 +01:00
static void
_nm_logging_clear_platform_logging_cache_impl ( void )
2015-12-15 11:14:34 +01:00
{
while ( sysctl_clear_cache_list ) {
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( sysctl_clear_cache_list - > data ) ;
sysctl_clear_cache_list = g_slist_delete_link ( sysctl_clear_cache_list , sysctl_clear_cache_list ) ;
g_hash_table_destroy ( priv - > sysctl_get_prev_values ) ;
priv - > sysctl_get_prev_values = NULL ;
priv - > sysctl_get_warned = FALSE ;
}
}
static void
2016-12-08 15:12:52 +01:00
_log_dbg_sysctl_get_impl ( NMPlatform * platform , const char * pathid , const char * contents )
2015-12-15 11:14:34 +01:00
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
const char * prev_value = NULL ;
if ( ! priv - > sysctl_get_prev_values ) {
2016-03-01 10:17:44 +01:00
_nm_logging_clear_platform_logging_cache = _nm_logging_clear_platform_logging_cache_impl ;
2015-12-15 11:14:34 +01:00
sysctl_clear_cache_list = g_slist_prepend ( sysctl_clear_cache_list , platform ) ;
priv - > sysctl_get_prev_values = g_hash_table_new_full ( g_str_hash , g_str_equal , g_free , g_free ) ;
} else
2016-12-08 15:12:52 +01:00
prev_value = g_hash_table_lookup ( priv - > sysctl_get_prev_values , pathid ) ;
2015-12-15 11:14:34 +01:00
if ( prev_value ) {
if ( strcmp ( prev_value , contents ) ! = 0 ) {
char * contents_escaped = g_strescape ( contents , NULL ) ;
char * prev_value_escaped = g_strescape ( prev_value , NULL ) ;
2016-12-08 15:12:52 +01:00
_LOGD ( " sysctl: reading '%s': '%s' (changed from '%s' on last read) " , pathid , contents_escaped , prev_value_escaped ) ;
2015-12-15 11:14:34 +01:00
g_free ( contents_escaped ) ;
g_free ( prev_value_escaped ) ;
2016-12-08 15:12:52 +01:00
g_hash_table_insert ( priv - > sysctl_get_prev_values , g_strdup ( pathid ) , g_strdup ( contents ) ) ;
2015-12-15 11:14:34 +01:00
}
} else {
char * contents_escaped = g_strescape ( contents , NULL ) ;
2016-12-08 15:12:52 +01:00
_LOGD ( " sysctl: reading '%s': '%s' " , pathid , contents_escaped ) ;
2015-12-15 11:14:34 +01:00
g_free ( contents_escaped ) ;
2016-12-08 15:12:52 +01:00
g_hash_table_insert ( priv - > sysctl_get_prev_values , g_strdup ( pathid ) , g_strdup ( contents ) ) ;
2015-12-15 11:14:34 +01:00
}
if ( ! priv - > sysctl_get_warned
& & g_hash_table_size ( priv - > sysctl_get_prev_values ) > 50000 ) {
_LOGW ( " sysctl: the internal cache for debug-logging of sysctl values grew pretty large. You can clear it by disabling debug-logging: `nmcli general logging level KEEP domains PLATFORM:INFO`. " ) ;
priv - > sysctl_get_warned = TRUE ;
}
}
2016-12-08 15:12:52 +01:00
# define _log_dbg_sysctl_get(platform, pathid, contents) \
2015-12-15 11:14:34 +01:00
G_STMT_START { \
if ( _LOGD_ENABLED ( ) ) \
2016-12-08 15:12:52 +01:00
_log_dbg_sysctl_get_impl ( platform , pathid , contents ) ; \
2015-12-15 11:14:34 +01:00
} G_STMT_END
static char *
2016-12-08 14:29:00 +01:00
sysctl_get ( NMPlatform * platform , const char * pathid , int dirfd , const char * path )
2015-12-15 11:14:34 +01:00
{
2016-02-19 01:06:28 +01:00
nm_auto_pop_netns NMPNetns * netns = NULL ;
2015-12-15 11:14:34 +01:00
GError * error = NULL ;
char * contents ;
2016-12-08 14:29:00 +01:00
ASSERT_SYSCTL_ARGS ( pathid , dirfd , path ) ;
2015-12-15 11:14:34 +01:00
2016-12-08 15:12:52 +01:00
if ( dirfd < 0 ) {
if ( ! nm_platform_netns_push ( platform , & netns ) )
return NULL ;
pathid = path ;
}
2016-02-19 01:06:28 +01:00
2016-12-08 15:12:52 +01:00
if ( nm_utils_file_get_contents ( dirfd , path , 1 * 1024 * 1024 , & contents , NULL , & error ) < 0 ) {
2015-12-15 11:14:34 +01:00
/* We assume FAILED means EOPNOTSUP */
if ( g_error_matches ( error , G_FILE_ERROR , G_FILE_ERROR_NOENT )
2016-08-25 13:11:21 +02:00
| | g_error_matches ( error , G_FILE_ERROR , G_FILE_ERROR_NODEV )
2015-12-15 11:14:34 +01:00
| | g_error_matches ( error , G_FILE_ERROR , G_FILE_ERROR_FAILED ) )
2016-12-08 15:12:52 +01:00
_LOGD ( " error reading %s: %s " , pathid , error - > message ) ;
2015-12-15 11:14:34 +01:00
else
2016-12-08 15:12:52 +01:00
_LOGE ( " error reading %s: %s " , pathid , error - > message ) ;
2015-12-15 11:14:34 +01:00
g_clear_error ( & error ) ;
return NULL ;
}
g_strstrip ( contents ) ;
2016-12-08 15:12:52 +01:00
_log_dbg_sysctl_get ( platform , pathid , contents ) ;
2015-12-15 11:14:34 +01:00
return contents ;
}
2016-10-02 18:22:50 +02:00
/*****************************************************************************/
2015-12-15 11:14:34 +01:00
2015-10-12 16:07:01 +02:00
static gboolean
check_support_kernel_extended_ifa_flags ( NMPlatform * platform )
{
g_return_val_if_fail ( NM_IS_LINUX_PLATFORM ( platform ) , FALSE ) ;
2015-04-14 23:14:06 +02:00
2015-10-12 16:07:01 +02:00
return _support_kernel_extended_ifa_flags_get ( ) ;
}
2015-04-14 23:14:06 +02:00
2015-10-12 16:07:01 +02:00
static gboolean
check_support_user_ipv6ll ( NMPlatform * platform )
{
g_return_val_if_fail ( NM_IS_LINUX_PLATFORM ( platform ) , FALSE ) ;
2015-04-14 23:14:06 +02:00
2015-10-12 16:07:01 +02:00
return _support_user_ipv6ll_get ( ) ;
}
2015-04-14 23:14:06 +02:00
2015-10-12 16:07:01 +02:00
static void
process_events ( NMPlatform * platform )
{
delayed_action_handle_all ( platform , TRUE ) ;
2015-04-14 23:14:06 +02:00
}
2016-10-02 18:22:50 +02:00
/*****************************************************************************/
2013-03-27 22:23:24 +01:00
2015-10-12 16:07:01 +02:00
# define cache_lookup_all_objects(type, platform, obj_type, visible_only) \
2016-12-01 10:56:09 +01:00
( { \
NMPCacheId _cache_id ; \
\
( ( const type * const * ) nmp_cache_lookup_multi ( NM_LINUX_PLATFORM_GET_PRIVATE ( ( platform ) ) - > cache , \
nmp_cache_id_init_object_type ( & _cache_id , ( obj_type ) , ( visible_only ) ) , \
NULL ) ) ; \
} )
2015-10-12 16:07:01 +02:00
2016-10-02 18:22:50 +02:00
/*****************************************************************************/
2015-10-12 16:07:01 +02:00
2015-04-06 18:29:36 +02:00
static void
2015-11-27 12:27:18 +01:00
do_emit_signal ( NMPlatform * platform , const NMPObject * obj , NMPCacheOpsType cache_op , gboolean was_visible )
2015-04-06 18:29:36 +02:00
{
gboolean is_visible ;
2015-06-15 18:47:04 +02:00
NMPObject obj_clone ;
const NMPClass * klass ;
2015-04-06 18:29:36 +02:00
nm_assert ( NM_IN_SET ( ( NMPlatformSignalChangeType ) cache_op , ( NMPlatformSignalChangeType ) NMP_CACHE_OPS_UNCHANGED , NM_PLATFORM_SIGNAL_ADDED , NM_PLATFORM_SIGNAL_CHANGED , NM_PLATFORM_SIGNAL_REMOVED ) ) ;
nm_assert ( obj | | cache_op = = NMP_CACHE_OPS_UNCHANGED ) ;
nm_assert ( ! obj | | cache_op = = NMP_CACHE_OPS_REMOVED | | obj = = nmp_cache_lookup_obj ( NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) - > cache , obj ) ) ;
nm_assert ( ! obj | | cache_op ! = NMP_CACHE_OPS_REMOVED | | obj ! = nmp_cache_lookup_obj ( NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) - > cache , obj ) ) ;
2016-12-09 10:11:29 +01:00
ASSERT_NETNS_CURRENT ( platform ) ;
2016-02-19 01:06:28 +01:00
2015-04-06 18:29:36 +02:00
switch ( cache_op ) {
case NMP_CACHE_OPS_ADDED :
if ( ! nmp_object_is_visible ( obj ) )
return ;
break ;
case NMP_CACHE_OPS_UPDATED :
is_visible = nmp_object_is_visible ( obj ) ;
if ( ! was_visible & & is_visible )
cache_op = NMP_CACHE_OPS_ADDED ;
else if ( was_visible & & ! is_visible ) {
/* This is a bit ugly. The object was visible and changed in a way that it became invisible.
* We raise a removed signal , but contrary to a real ' remove ' , @ obj is already changed to be
* different from what it was when the user saw it the last time .
*
* The more correct solution would be to have cache_pre_hook ( ) create a clone of the original
* value before it was changed to become invisible .
*
* But , don ' t bother . Probably nobody depends on the original values and only cares about the
* id properties ( which are still correct ) .
*/
cache_op = NMP_CACHE_OPS_REMOVED ;
} else if ( ! is_visible )
return ;
break ;
case NMP_CACHE_OPS_REMOVED :
if ( ! was_visible )
return ;
break ;
default :
g_assert ( cache_op = = NMP_CACHE_OPS_UNCHANGED ) ;
return ;
}
2015-06-15 18:47:04 +02:00
klass = NMP_OBJECT_GET_CLASS ( obj ) ;
2015-11-27 12:27:18 +01:00
_LOGt ( " emit signal %s %s: %s " ,
2015-06-15 18:47:04 +02:00
klass - > signal_type ,
2015-04-06 18:29:36 +02:00
nm_platform_signal_change_type_to_string ( ( NMPlatformSignalChangeType ) cache_op ) ,
2015-11-27 12:27:18 +01:00
nmp_object_to_string ( obj , NMP_OBJECT_TO_STRING_PUBLIC , NULL , 0 ) ) ;
2015-04-06 18:29:36 +02:00
2015-06-15 18:47:04 +02:00
/* don't expose @obj directly, but clone the public fields. A signal handler might
* call back into NMPlatform which could invalidate ( or modify ) @ obj . */
memcpy ( & obj_clone . object , & obj - > object , klass - > sizeof_public ) ;
2015-11-27 12:54:31 +01:00
g_signal_emit ( platform ,
_nm_platform_signal_id_get ( klass - > signal_type_id ) ,
0 ,
2016-10-22 13:08:36 +02:00
( int ) klass - > obj_type ,
2015-11-27 12:54:31 +01:00
obj_clone . object . ifindex ,
& obj_clone . object ,
2016-10-22 13:08:36 +02:00
( int ) cache_op ) ;
2015-04-06 18:29:36 +02:00
}
2016-10-02 18:22:50 +02:00
/*****************************************************************************/
2015-04-06 18:29:36 +02:00
2016-04-08 12:40:35 +02:00
_NM_UTILS_LOOKUP_DEFINE ( static , delayed_action_refresh_from_object_type , NMPObjectType , DelayedActionType ,
NM_UTILS_LOOKUP_DEFAULT_NM_ASSERT ( DELAYED_ACTION_TYPE_NONE ) ,
NM_UTILS_LOOKUP_ITEM ( NMP_OBJECT_TYPE_LINK , DELAYED_ACTION_TYPE_REFRESH_ALL_LINKS ) ,
NM_UTILS_LOOKUP_ITEM ( NMP_OBJECT_TYPE_IP4_ADDRESS , DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ADDRESSES ) ,
NM_UTILS_LOOKUP_ITEM ( NMP_OBJECT_TYPE_IP6_ADDRESS , DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ADDRESSES ) ,
NM_UTILS_LOOKUP_ITEM ( NMP_OBJECT_TYPE_IP4_ROUTE , DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ROUTES ) ,
NM_UTILS_LOOKUP_ITEM ( NMP_OBJECT_TYPE_IP6_ROUTE , DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ROUTES ) ,
NM_UTILS_LOOKUP_ITEM_IGNORE_OTHER ( ) ,
) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
2016-04-08 12:40:35 +02:00
_NM_UTILS_LOOKUP_DEFINE ( static , delayed_action_refresh_to_object_type , DelayedActionType , NMPObjectType ,
NM_UTILS_LOOKUP_DEFAULT_NM_ASSERT ( NMP_OBJECT_TYPE_UNKNOWN ) ,
NM_UTILS_LOOKUP_ITEM ( DELAYED_ACTION_TYPE_REFRESH_ALL_LINKS , NMP_OBJECT_TYPE_LINK ) ,
NM_UTILS_LOOKUP_ITEM ( DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ADDRESSES , NMP_OBJECT_TYPE_IP4_ADDRESS ) ,
NM_UTILS_LOOKUP_ITEM ( DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ADDRESSES , NMP_OBJECT_TYPE_IP6_ADDRESS ) ,
NM_UTILS_LOOKUP_ITEM ( DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ROUTES , NMP_OBJECT_TYPE_IP4_ROUTE ) ,
NM_UTILS_LOOKUP_ITEM ( DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ROUTES , NMP_OBJECT_TYPE_IP6_ROUTE ) ,
NM_UTILS_LOOKUP_ITEM_IGNORE_OTHER ( ) ,
) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
_NM_UTILS_LOOKUP_DEFINE ( static , delayed_action_refresh_all_to_idx , DelayedActionType , guint ,
NM_UTILS_LOOKUP_DEFAULT_NM_ASSERT ( 0 ) ,
NM_UTILS_LOOKUP_ITEM ( DELAYED_ACTION_TYPE_REFRESH_ALL_LINKS , DELAYED_ACTION_IDX_REFRESH_ALL_LINKS ) ,
NM_UTILS_LOOKUP_ITEM ( DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ADDRESSES , DELAYED_ACTION_IDX_REFRESH_ALL_IP4_ADDRESSES ) ,
NM_UTILS_LOOKUP_ITEM ( DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ADDRESSES , DELAYED_ACTION_IDX_REFRESH_ALL_IP6_ADDRESSES ) ,
NM_UTILS_LOOKUP_ITEM ( DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ROUTES , DELAYED_ACTION_IDX_REFRESH_ALL_IP4_ROUTES ) ,
NM_UTILS_LOOKUP_ITEM ( DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ROUTES , DELAYED_ACTION_IDX_REFRESH_ALL_IP6_ROUTES ) ,
NM_UTILS_LOOKUP_ITEM_IGNORE_OTHER ( ) ,
) ;
2016-04-08 12:40:35 +02:00
NM_UTILS_LOOKUP_STR_DEFINE_STATIC ( delayed_action_to_string , DelayedActionType ,
NM_UTILS_LOOKUP_DEFAULT_NM_ASSERT ( " unknown " ) ,
NM_UTILS_LOOKUP_STR_ITEM ( DELAYED_ACTION_TYPE_REFRESH_ALL_LINKS , " refresh-all-links " ) ,
NM_UTILS_LOOKUP_STR_ITEM ( DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ADDRESSES , " refresh-all-ip4-addresses " ) ,
NM_UTILS_LOOKUP_STR_ITEM ( DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ADDRESSES , " refresh-all-ip6-addresses " ) ,
NM_UTILS_LOOKUP_STR_ITEM ( DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ROUTES , " refresh-all-ip4-routes " ) ,
NM_UTILS_LOOKUP_STR_ITEM ( DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ROUTES , " refresh-all-ip6-routes " ) ,
NM_UTILS_LOOKUP_STR_ITEM ( DELAYED_ACTION_TYPE_REFRESH_LINK , " refresh-link " ) ,
NM_UTILS_LOOKUP_STR_ITEM ( DELAYED_ACTION_TYPE_MASTER_CONNECTED , " master-connected " ) ,
NM_UTILS_LOOKUP_STR_ITEM ( DELAYED_ACTION_TYPE_READ_NETLINK , " read-netlink " ) ,
NM_UTILS_LOOKUP_STR_ITEM ( DELAYED_ACTION_TYPE_WAIT_FOR_NL_RESPONSE , " wait-for-nl-response " ) ,
NM_UTILS_LOOKUP_ITEM_IGNORE ( DELAYED_ACTION_TYPE_NONE ) ,
NM_UTILS_LOOKUP_ITEM_IGNORE ( DELAYED_ACTION_TYPE_REFRESH_ALL ) ,
NM_UTILS_LOOKUP_ITEM_IGNORE ( __DELAYED_ACTION_TYPE_MAX ) ,
) ;
2015-04-06 18:29:36 +02:00
2015-12-14 12:09:50 +01:00
static const char *
delayed_action_to_string_full ( DelayedActionType action_type , gpointer user_data , char * buf , gsize buf_size )
{
char * buf0 = buf ;
2015-12-14 14:47:41 +01:00
const DelayedActionWaitForNlResponseData * data ;
2015-12-14 12:09:50 +01:00
nm_utils_strbuf_append_str ( & buf , & buf_size , delayed_action_to_string ( action_type ) ) ;
switch ( action_type ) {
case DELAYED_ACTION_TYPE_MASTER_CONNECTED :
nm_utils_strbuf_append ( & buf , & buf_size , " (master-ifindex %d) " , GPOINTER_TO_INT ( user_data ) ) ;
break ;
case DELAYED_ACTION_TYPE_REFRESH_LINK :
nm_utils_strbuf_append ( & buf , & buf_size , " (ifindex %d) " , GPOINTER_TO_INT ( user_data ) ) ;
break ;
2015-12-14 14:47:41 +01:00
case DELAYED_ACTION_TYPE_WAIT_FOR_NL_RESPONSE :
data = user_data ;
if ( data ) {
gint64 timeout = data - > timeout_abs_ns - nm_utils_get_monotonic_timestamp_ns ( ) ;
char b [ 255 ] ;
2015-12-23 12:35:12 +01:00
nm_utils_strbuf_append ( & buf , & buf_size , " (seq %u, timeout in %s% " G_GINT64_FORMAT " .%09 " G_GINT64_FORMAT " %s%s) " ,
2015-12-14 14:47:41 +01:00
data - > seq_number ,
timeout < 0 ? " - " : " " ,
( timeout < 0 ? - timeout : timeout ) / NM_UTILS_NS_PER_SECOND ,
( timeout < 0 ? - timeout : timeout ) % NM_UTILS_NS_PER_SECOND ,
data - > seq_result ? " , " : " " ,
data - > seq_result ? wait_for_nl_response_to_string ( data - > seq_result , b , sizeof ( b ) ) : " " ) ;
} else
nm_utils_strbuf_append_str ( & buf , & buf_size , " (any) " ) ;
break ;
2015-12-14 12:09:50 +01:00
default :
nm_assert ( ! user_data ) ;
break ;
}
return buf0 ;
}
# define _LOGt_delayed_action(action_type, user_data, operation) \
G_STMT_START { \
char _buf [ 255 ] ; \
\
_LOGt ( " delayed-action: %s %s " , \
" " operation , \
delayed_action_to_string_full ( action_type , user_data , _buf , sizeof ( _buf ) ) ) ; \
} G_STMT_END
2015-04-06 18:29:36 +02:00
2015-12-14 14:47:41 +01:00
/*****************************************************************************/
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
static gboolean
delayed_action_refresh_all_in_progress ( NMPlatform * platform , DelayedActionType action_type )
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
nm_assert ( nm_utils_is_power_of_two ( action_type ) ) ;
nm_assert ( NM_FLAGS_ANY ( action_type , DELAYED_ACTION_TYPE_REFRESH_ALL ) ) ;
nm_assert ( ! NM_FLAGS_ANY ( action_type , ~ DELAYED_ACTION_TYPE_REFRESH_ALL ) ) ;
if ( NM_FLAGS_ANY ( priv - > delayed_action . flags , action_type ) )
return TRUE ;
if ( priv - > delayed_action . refresh_all_in_progess [ delayed_action_refresh_all_to_idx ( action_type ) ] > 0 )
return TRUE ;
return FALSE ;
}
2015-12-14 14:47:41 +01:00
static void
delayed_action_wait_for_nl_response_complete ( NMPlatform * platform ,
guint idx ,
WaitForNlResponseResult seq_result )
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
DelayedActionWaitForNlResponseData * data ;
nm_assert ( NM_FLAGS_HAS ( priv - > delayed_action . flags , DELAYED_ACTION_TYPE_WAIT_FOR_NL_RESPONSE ) ) ;
nm_assert ( idx < priv - > delayed_action . list_wait_for_nl_response - > len ) ;
nm_assert ( seq_result ) ;
data = & g_array_index ( priv - > delayed_action . list_wait_for_nl_response , DelayedActionWaitForNlResponseData , idx ) ;
_LOGt_delayed_action ( DELAYED_ACTION_TYPE_WAIT_FOR_NL_RESPONSE , data , " complete " ) ;
2016-04-08 12:25:41 +02:00
if ( priv - > delayed_action . list_wait_for_nl_response - > len < = 1 )
2015-12-14 14:47:41 +01:00
priv - > delayed_action . flags & = ~ DELAYED_ACTION_TYPE_WAIT_FOR_NL_RESPONSE ;
2016-04-08 12:25:41 +02:00
if ( data - > out_seq_result )
* data - > out_seq_result = seq_result ;
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
if ( data - > out_refresh_all_in_progess ) {
nm_assert ( * data - > out_refresh_all_in_progess > 0 ) ;
* data - > out_refresh_all_in_progess - = 1 ;
}
2015-12-14 14:47:41 +01:00
2016-04-08 12:25:41 +02:00
g_array_remove_index_fast ( priv - > delayed_action . list_wait_for_nl_response , idx ) ;
2015-12-14 14:47:41 +01:00
}
static void
delayed_action_wait_for_nl_response_complete_all ( NMPlatform * platform ,
2016-01-24 18:46:14 +01:00
WaitForNlResponseResult fallback_result )
2015-12-14 14:47:41 +01:00
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
if ( NM_FLAGS_HAS ( priv - > delayed_action . flags , DELAYED_ACTION_TYPE_WAIT_FOR_NL_RESPONSE ) ) {
2016-01-24 18:46:14 +01:00
while ( priv - > delayed_action . list_wait_for_nl_response - > len > 0 ) {
const DelayedActionWaitForNlResponseData * data ;
guint idx = priv - > delayed_action . list_wait_for_nl_response - > len - 1 ;
WaitForNlResponseResult r ;
data = & g_array_index ( priv - > delayed_action . list_wait_for_nl_response , DelayedActionWaitForNlResponseData , idx ) ;
/* prefer the result that we already have. */
r = data - > seq_result ? : fallback_result ;
delayed_action_wait_for_nl_response_complete ( platform , idx , r ) ;
}
2015-12-14 14:47:41 +01:00
}
nm_assert ( ! NM_FLAGS_HAS ( priv - > delayed_action . flags , DELAYED_ACTION_TYPE_WAIT_FOR_NL_RESPONSE ) ) ;
nm_assert ( priv - > delayed_action . list_wait_for_nl_response - > len = = 0 ) ;
}
/*****************************************************************************/
2015-04-06 18:29:36 +02:00
static void
delayed_action_handle_MASTER_CONNECTED ( NMPlatform * platform , int master_ifindex )
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
2015-10-20 14:43:31 +02:00
nm_auto_nmpobj NMPObject * obj_cache = NULL ;
2015-04-06 18:29:36 +02:00
gboolean was_visible ;
NMPCacheOpsType cache_op ;
cache_op = nmp_cache_update_link_master_connected ( priv - > cache , master_ifindex , & obj_cache , & was_visible , cache_pre_hook , platform ) ;
2015-11-27 12:27:18 +01:00
do_emit_signal ( platform , obj_cache , cache_op , was_visible ) ;
2015-04-06 18:29:36 +02:00
}
static void
delayed_action_handle_REFRESH_LINK ( NMPlatform * platform , int ifindex )
{
2015-12-14 14:47:41 +01:00
do_request_link_no_delayed_actions ( platform , ifindex , NULL ) ;
2015-04-06 18:29:36 +02:00
}
static void
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
delayed_action_handle_REFRESH_ALL ( NMPlatform * platform , DelayedActionType flags )
2015-04-06 18:29:36 +02:00
{
2015-12-14 14:47:41 +01:00
do_request_all_no_delayed_actions ( platform , flags ) ;
2015-04-06 18:29:36 +02:00
}
static void
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
delayed_action_handle_READ_NETLINK ( NMPlatform * platform )
2015-12-14 14:47:41 +01:00
{
2015-12-15 10:40:41 +01:00
event_handler_read_netlink ( platform , FALSE ) ;
2015-12-14 14:47:41 +01:00
}
static void
delayed_action_handle_WAIT_FOR_NL_RESPONSE ( NMPlatform * platform )
2015-04-06 18:29:36 +02:00
{
2015-12-15 10:40:41 +01:00
event_handler_read_netlink ( platform , TRUE ) ;
2015-04-06 18:29:36 +02:00
}
static gboolean
delayed_action_handle_one ( NMPlatform * platform )
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
gpointer user_data ;
2015-04-06 18:29:36 +02:00
2015-12-15 10:55:27 +01:00
if ( priv - > delayed_action . flags = = DELAYED_ACTION_TYPE_NONE )
2015-04-06 18:29:36 +02:00
return FALSE ;
/* First process DELAYED_ACTION_TYPE_MASTER_CONNECTED actions.
* This type of action is entirely cache - internal and is here to resolve a
* cache inconsistency . It should be fixed right away . */
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
if ( NM_FLAGS_HAS ( priv - > delayed_action . flags , DELAYED_ACTION_TYPE_MASTER_CONNECTED ) ) {
nm_assert ( priv - > delayed_action . list_master_connected - > len > 0 ) ;
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
user_data = priv - > delayed_action . list_master_connected - > pdata [ 0 ] ;
g_ptr_array_remove_index_fast ( priv - > delayed_action . list_master_connected , 0 ) ;
if ( priv - > delayed_action . list_master_connected - > len = = 0 )
priv - > delayed_action . flags & = ~ DELAYED_ACTION_TYPE_MASTER_CONNECTED ;
2016-09-23 15:03:41 +02:00
nm_assert ( _nm_utils_ptrarray_find_first ( ( gconstpointer * ) priv - > delayed_action . list_master_connected - > pdata , priv - > delayed_action . list_master_connected - > len , user_data ) < 0 ) ;
2015-04-06 18:29:36 +02:00
2015-11-06 12:58:55 +01:00
_LOGt_delayed_action ( DELAYED_ACTION_TYPE_MASTER_CONNECTED , user_data , " handle " ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
delayed_action_handle_MASTER_CONNECTED ( platform , GPOINTER_TO_INT ( user_data ) ) ;
return TRUE ;
}
nm_assert ( priv - > delayed_action . list_master_connected - > len = = 0 ) ;
/* Next we prefer read-netlink, because the buffer size is limited and we want to process events
* from netlink early . */
if ( NM_FLAGS_HAS ( priv - > delayed_action . flags , DELAYED_ACTION_TYPE_READ_NETLINK ) ) {
2015-11-06 12:58:55 +01:00
_LOGt_delayed_action ( DELAYED_ACTION_TYPE_READ_NETLINK , NULL , " handle " ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
priv - > delayed_action . flags & = ~ DELAYED_ACTION_TYPE_READ_NETLINK ;
delayed_action_handle_READ_NETLINK ( platform ) ;
2015-04-06 18:29:36 +02:00
return TRUE ;
}
2015-12-17 18:24:57 +01:00
if ( NM_FLAGS_ANY ( priv - > delayed_action . flags , DELAYED_ACTION_TYPE_REFRESH_ALL ) ) {
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
DelayedActionType flags , iflags ;
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
flags = priv - > delayed_action . flags & DELAYED_ACTION_TYPE_REFRESH_ALL ;
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
priv - > delayed_action . flags & = ~ DELAYED_ACTION_TYPE_REFRESH_ALL ;
2015-04-06 18:29:36 +02:00
2015-11-06 12:58:55 +01:00
if ( _LOGt_ENABLED ( ) ) {
2016-04-07 17:14:03 +02:00
FOR_EACH_DELAYED_ACTION ( iflags , flags ) {
_LOGt_delayed_action ( iflags , NULL , " handle " ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
}
2015-04-06 18:29:36 +02:00
}
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
delayed_action_handle_REFRESH_ALL ( platform , flags ) ;
return TRUE ;
}
2015-04-06 18:29:36 +02:00
2015-12-17 18:24:57 +01:00
if ( NM_FLAGS_HAS ( priv - > delayed_action . flags , DELAYED_ACTION_TYPE_REFRESH_LINK ) ) {
2015-12-14 14:47:41 +01:00
nm_assert ( priv - > delayed_action . list_refresh_link - > len > 0 ) ;
2015-04-06 18:29:36 +02:00
2015-12-14 14:47:41 +01:00
user_data = priv - > delayed_action . list_refresh_link - > pdata [ 0 ] ;
g_ptr_array_remove_index_fast ( priv - > delayed_action . list_refresh_link , 0 ) ;
if ( priv - > delayed_action . list_refresh_link - > len = = 0 )
priv - > delayed_action . flags & = ~ DELAYED_ACTION_TYPE_REFRESH_LINK ;
2016-09-23 15:03:41 +02:00
nm_assert ( _nm_utils_ptrarray_find_first ( ( gconstpointer * ) priv - > delayed_action . list_refresh_link - > pdata , priv - > delayed_action . list_refresh_link - > len , user_data ) < 0 ) ;
2015-04-06 18:29:36 +02:00
2015-12-14 14:47:41 +01:00
_LOGt_delayed_action ( DELAYED_ACTION_TYPE_REFRESH_LINK , user_data , " handle " ) ;
2015-04-06 18:29:36 +02:00
2015-12-14 14:47:41 +01:00
delayed_action_handle_REFRESH_LINK ( platform , GPOINTER_TO_INT ( user_data ) ) ;
2015-04-06 18:29:36 +02:00
2015-12-14 14:47:41 +01:00
return TRUE ;
}
if ( NM_FLAGS_HAS ( priv - > delayed_action . flags , DELAYED_ACTION_TYPE_WAIT_FOR_NL_RESPONSE ) ) {
nm_assert ( priv - > delayed_action . list_wait_for_nl_response - > len > 0 ) ;
_LOGt_delayed_action ( DELAYED_ACTION_TYPE_WAIT_FOR_NL_RESPONSE , NULL , " handle " ) ;
delayed_action_handle_WAIT_FOR_NL_RESPONSE ( platform ) ;
return TRUE ;
}
return FALSE ;
2015-04-06 18:29:36 +02:00
}
static gboolean
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
delayed_action_handle_all ( NMPlatform * platform , gboolean read_netlink )
2015-04-06 18:29:36 +02:00
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
gboolean any = FALSE ;
2015-12-17 18:24:57 +01:00
g_return_val_if_fail ( priv - > delayed_action . is_handling = = 0 , FALSE ) ;
2015-04-06 18:29:36 +02:00
priv - > delayed_action . is_handling + + ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
if ( read_netlink )
delayed_action_schedule ( platform , DELAYED_ACTION_TYPE_READ_NETLINK , NULL ) ;
2015-04-06 18:29:36 +02:00
while ( delayed_action_handle_one ( platform ) )
any = TRUE ;
priv - > delayed_action . is_handling - - ;
2015-12-14 14:47:41 +01:00
2015-12-17 18:24:57 +01:00
cache_prune_candidates_prune ( platform ) ;
2015-12-14 14:47:41 +01:00
2015-04-06 18:29:36 +02:00
return any ;
}
static void
delayed_action_schedule ( NMPlatform * platform , DelayedActionType action_type , gpointer user_data )
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
DelayedActionType iflags ;
nm_assert ( action_type ! = DELAYED_ACTION_TYPE_NONE ) ;
2015-12-14 12:09:50 +01:00
switch ( action_type ) {
case DELAYED_ACTION_TYPE_REFRESH_LINK :
2016-09-23 15:03:41 +02:00
if ( _nm_utils_ptrarray_find_first ( ( gconstpointer * ) priv - > delayed_action . list_refresh_link - > pdata , priv - > delayed_action . list_refresh_link - > len , user_data ) < 0 )
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
g_ptr_array_add ( priv - > delayed_action . list_refresh_link , user_data ) ;
2015-12-14 12:09:50 +01:00
break ;
case DELAYED_ACTION_TYPE_MASTER_CONNECTED :
2016-09-23 15:03:41 +02:00
if ( _nm_utils_ptrarray_find_first ( ( gconstpointer * ) priv - > delayed_action . list_master_connected - > pdata , priv - > delayed_action . list_master_connected - > len , user_data ) < 0 )
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
g_ptr_array_add ( priv - > delayed_action . list_master_connected , user_data ) ;
2015-12-14 12:09:50 +01:00
break ;
2015-12-14 14:47:41 +01:00
case DELAYED_ACTION_TYPE_WAIT_FOR_NL_RESPONSE :
g_array_append_vals ( priv - > delayed_action . list_wait_for_nl_response , user_data , 1 ) ;
break ;
2015-12-14 12:09:50 +01:00
default :
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
nm_assert ( ! user_data ) ;
2015-12-14 12:09:50 +01:00
nm_assert ( ! NM_FLAGS_HAS ( action_type , DELAYED_ACTION_TYPE_REFRESH_LINK ) ) ;
nm_assert ( ! NM_FLAGS_HAS ( action_type , DELAYED_ACTION_TYPE_MASTER_CONNECTED ) ) ;
2015-12-14 14:47:41 +01:00
nm_assert ( ! NM_FLAGS_HAS ( action_type , DELAYED_ACTION_TYPE_WAIT_FOR_NL_RESPONSE ) ) ;
2015-12-14 12:09:50 +01:00
break ;
}
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
priv - > delayed_action . flags | = action_type ;
2015-04-06 18:29:36 +02:00
2015-11-06 12:58:55 +01:00
if ( _LOGt_ENABLED ( ) ) {
2016-04-07 17:14:03 +02:00
FOR_EACH_DELAYED_ACTION ( iflags , action_type ) {
_LOGt_delayed_action ( iflags , user_data , " schedule " ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
}
}
2015-04-06 18:29:36 +02:00
}
2015-12-14 14:47:41 +01:00
static void
delayed_action_schedule_WAIT_FOR_NL_RESPONSE ( NMPlatform * platform ,
guint32 seq_number ,
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
WaitForNlResponseResult * out_seq_result ,
gint * out_refresh_all_in_progess )
2015-12-14 14:47:41 +01:00
{
DelayedActionWaitForNlResponseData data = {
. seq_number = seq_number ,
. timeout_abs_ns = nm_utils_get_monotonic_timestamp_ns ( ) + ( 200 * ( NM_UTILS_NS_PER_SECOND / 1000 ) ) ,
. out_seq_result = out_seq_result ,
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
. out_refresh_all_in_progess = out_refresh_all_in_progess ,
2015-12-14 14:47:41 +01:00
} ;
delayed_action_schedule ( platform ,
DELAYED_ACTION_TYPE_WAIT_FOR_NL_RESPONSE ,
& data ) ;
}
2016-10-02 18:22:50 +02:00
/*****************************************************************************/
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
static void
2015-06-19 16:24:18 +02:00
cache_prune_candidates_record_all ( NMPlatform * platform , NMPObjectType obj_type )
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
2016-12-01 10:56:09 +01:00
NMPCacheId cache_id ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
priv - > prune_candidates = nmp_cache_lookup_all_to_hash ( priv - > cache ,
2016-12-01 10:56:09 +01:00
nmp_cache_id_init_object_type ( & cache_id , obj_type , FALSE ) ,
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
priv - > prune_candidates ) ;
2015-11-06 12:58:55 +01:00
_LOGt ( " cache-prune: record %s (now %u candidates) " , nmp_class_from_type ( obj_type ) - > obj_type_name ,
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
priv - > prune_candidates ? g_hash_table_size ( priv - > prune_candidates ) : 0 ) ;
}
static void
cache_prune_candidates_record_one ( NMPlatform * platform , NMPObject * obj )
{
NMLinuxPlatformPrivate * priv ;
if ( ! obj )
return ;
priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
if ( ! priv - > prune_candidates )
priv - > prune_candidates = g_hash_table_new_full ( NULL , NULL , ( GDestroyNotify ) nmp_object_unref , NULL ) ;
2015-11-06 12:58:55 +01:00
if ( _LOGt_ENABLED ( ) & & ! g_hash_table_contains ( priv - > prune_candidates , obj ) )
_LOGt ( " cache-prune: record-one: %s " , nmp_object_to_string ( obj , NMP_OBJECT_TO_STRING_ALL , NULL , 0 ) ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
g_hash_table_add ( priv - > prune_candidates , nmp_object_ref ( obj ) ) ;
}
static void
cache_prune_candidates_drop ( NMPlatform * platform , const NMPObject * obj )
{
NMLinuxPlatformPrivate * priv ;
if ( ! obj )
return ;
priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
if ( priv - > prune_candidates ) {
2015-11-06 12:58:55 +01:00
if ( _LOGt_ENABLED ( ) & & g_hash_table_contains ( priv - > prune_candidates , obj ) )
_LOGt ( " cache-prune: drop-one: %s " , nmp_object_to_string ( obj , NMP_OBJECT_TO_STRING_ALL , NULL , 0 ) ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
g_hash_table_remove ( priv - > prune_candidates , obj ) ;
}
}
static void
cache_prune_candidates_prune ( NMPlatform * platform )
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
GHashTable * prune_candidates ;
GHashTableIter iter ;
const NMPObject * obj ;
gboolean was_visible ;
NMPCacheOpsType cache_op ;
if ( ! priv - > prune_candidates )
return ;
prune_candidates = priv - > prune_candidates ;
priv - > prune_candidates = NULL ;
g_hash_table_iter_init ( & iter , prune_candidates ) ;
while ( g_hash_table_iter_next ( & iter , ( gpointer * ) & obj , NULL ) ) {
2015-10-20 14:43:31 +02:00
nm_auto_nmpobj NMPObject * obj_cache = NULL ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
2015-11-06 12:58:55 +01:00
_LOGt ( " cache-prune: prune %s " , nmp_object_to_string ( obj , NMP_OBJECT_TO_STRING_ALL , NULL , 0 ) ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
cache_op = nmp_cache_remove ( priv - > cache , obj , TRUE , & obj_cache , & was_visible , cache_pre_hook , platform ) ;
2015-11-27 12:27:18 +01:00
do_emit_signal ( platform , obj_cache , cache_op , was_visible ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
}
g_hash_table_unref ( prune_candidates ) ;
}
2015-04-06 18:29:36 +02:00
static void
cache_pre_hook ( NMPCache * cache , const NMPObject * old , const NMPObject * new , NMPCacheOpsType ops_type , gpointer user_data )
{
2016-04-10 11:21:50 +02:00
NMPlatform * platform = user_data ;
2015-04-06 18:29:36 +02:00
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
const NMPClass * klass ;
2015-10-27 10:25:22 +01:00
char str_buf [ sizeof ( _nm_utils_to_string_buffer ) ] ;
char str_buf2 [ sizeof ( _nm_utils_to_string_buffer ) ] ;
2015-04-06 18:29:36 +02:00
nm_assert ( old | | new ) ;
nm_assert ( NM_IN_SET ( ops_type , NMP_CACHE_OPS_ADDED , NMP_CACHE_OPS_REMOVED , NMP_CACHE_OPS_UPDATED ) ) ;
nm_assert ( ops_type ! = NMP_CACHE_OPS_ADDED | | ( old = = NULL & & NMP_OBJECT_IS_VALID ( new ) & & nmp_object_is_alive ( new ) ) ) ;
nm_assert ( ops_type ! = NMP_CACHE_OPS_REMOVED | | ( new = = NULL & & NMP_OBJECT_IS_VALID ( old ) & & nmp_object_is_alive ( old ) ) ) ;
nm_assert ( ops_type ! = NMP_CACHE_OPS_UPDATED | | ( NMP_OBJECT_IS_VALID ( old ) & & nmp_object_is_alive ( old ) & & NMP_OBJECT_IS_VALID ( new ) & & nmp_object_is_alive ( new ) ) ) ;
nm_assert ( new = = NULL | | old = = NULL | | nmp_object_id_equal ( new , old ) ) ;
2016-04-10 11:21:50 +02:00
nm_assert ( ! old | | ! new | | NMP_OBJECT_GET_CLASS ( old ) = = NMP_OBJECT_GET_CLASS ( new ) ) ;
2015-04-06 18:29:36 +02:00
klass = old ? NMP_OBJECT_GET_CLASS ( old ) : NMP_OBJECT_GET_CLASS ( new ) ;
nm_assert ( klass = = ( new ? NMP_OBJECT_GET_CLASS ( new ) : NMP_OBJECT_GET_CLASS ( old ) ) ) ;
2015-11-06 12:58:55 +01:00
_LOGt ( " update-cache-%s: %s: %s%s%s " ,
2015-04-06 18:29:36 +02:00
klass - > obj_type_name ,
( ops_type = = NMP_CACHE_OPS_UPDATED
? " UPDATE "
: ( ops_type = = NMP_CACHE_OPS_REMOVED
? " REMOVE "
: ( ops_type = = NMP_CACHE_OPS_ADDED ) ? " ADD " : " ??? " ) ) ,
( ops_type ! = NMP_CACHE_OPS_ADDED
? nmp_object_to_string ( old , NMP_OBJECT_TO_STRING_ALL , str_buf2 , sizeof ( str_buf2 ) )
: nmp_object_to_string ( new , NMP_OBJECT_TO_STRING_ALL , str_buf2 , sizeof ( str_buf2 ) ) ) ,
( ops_type = = NMP_CACHE_OPS_UPDATED ) ? " -> " : " " ,
( ops_type = = NMP_CACHE_OPS_UPDATED
? nmp_object_to_string ( new , NMP_OBJECT_TO_STRING_ALL , str_buf , sizeof ( str_buf ) )
: " " ) ) ;
switch ( klass - > obj_type ) {
2015-06-19 16:24:18 +02:00
case NMP_OBJECT_TYPE_LINK :
2015-04-06 18:29:36 +02:00
{
/* check whether changing a slave link can cause a master link (bridge or bond) to go up/down */
if ( old
& & nmp_cache_link_connected_needs_toggle_by_ifindex ( priv - > cache , old - > link . master , new , old ) )
delayed_action_schedule ( platform , DELAYED_ACTION_TYPE_MASTER_CONNECTED , GINT_TO_POINTER ( old - > link . master ) ) ;
if ( new
& & ( ! old | | old - > link . master ! = new - > link . master )
& & nmp_cache_link_connected_needs_toggle_by_ifindex ( priv - > cache , new - > link . master , new , old ) )
delayed_action_schedule ( platform , DELAYED_ACTION_TYPE_MASTER_CONNECTED , GINT_TO_POINTER ( new - > link . master ) ) ;
}
{
/* check whether we are about to change a master link that needs toggling connected state. */
2015-06-25 17:49:09 +02:00
if ( new /* <-- nonsensical, make coverity happy */
& & nmp_cache_link_connected_needs_toggle ( cache , new , new , old ) )
2015-04-06 18:29:36 +02:00
delayed_action_schedule ( platform , DELAYED_ACTION_TYPE_MASTER_CONNECTED , GINT_TO_POINTER ( new - > link . ifindex ) ) ;
}
{
int ifindex = 0 ;
/* if we remove a link (from netlink), we must refresh the addresses and routes */
2015-06-25 17:49:09 +02:00
if ( ops_type = = NMP_CACHE_OPS_REMOVED
& & old /* <-- nonsensical, make coverity happy */ )
2015-04-06 18:29:36 +02:00
ifindex = old - > link . ifindex ;
else if ( ops_type = = NMP_CACHE_OPS_UPDATED
2015-06-25 17:49:09 +02:00
& & old & & new /* <-- nonsensical, make coverity happy */
2015-04-06 18:29:36 +02:00
& & ! new - > _link . netlink . is_in_netlink
& & new - > _link . netlink . is_in_netlink ! = old - > _link . netlink . is_in_netlink )
ifindex = new - > link . ifindex ;
if ( ifindex > 0 ) {
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
delayed_action_schedule ( platform ,
DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ADDRESSES |
DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ADDRESSES |
DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ROUTES |
DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ROUTES ,
NULL ) ;
2015-04-06 18:29:36 +02:00
}
}
2015-09-14 15:05:00 +02:00
{
int ifindex = - 1 ;
/* removal of a link could be caused by moving the link to another netns.
* In this case , we potentially have to update other links that have this link as parent .
2015-11-26 12:02:29 +01:00
* Currently , kernel misses to sent us a notification in this case
* ( https : //bugzilla.redhat.com/show_bug.cgi?id=1262908). */
2015-09-14 15:05:00 +02:00
if ( ops_type = = NMP_CACHE_OPS_REMOVED
& & old /* <-- nonsensical, make coverity happy */
& & old - > _link . netlink . is_in_netlink )
ifindex = old - > link . ifindex ;
else if ( ops_type = = NMP_CACHE_OPS_UPDATED
& & old & & new /* <-- nonsensical, make coverity happy */
& & old - > _link . netlink . is_in_netlink
& & ! new - > _link . netlink . is_in_netlink )
ifindex = new - > link . ifindex ;
if ( ifindex > 0 ) {
const NMPlatformLink * const * links ;
links = cache_lookup_all_objects ( NMPlatformLink , platform , NMP_OBJECT_TYPE_LINK , FALSE ) ;
if ( links ) {
for ( ; * links ; links + + ) {
const NMPlatformLink * l = ( * links ) ;
if ( l - > parent = = ifindex )
delayed_action_schedule ( platform , DELAYED_ACTION_TYPE_REFRESH_LINK , GINT_TO_POINTER ( l - > ifindex ) ) ;
}
}
}
}
2015-04-06 18:29:36 +02:00
{
/* if a link goes down, we must refresh routes */
if ( ops_type = = NMP_CACHE_OPS_UPDATED
2015-06-25 17:49:09 +02:00
& & old & & new /* <-- nonsensical, make coverity happy */
2015-04-06 18:29:36 +02:00
& & old - > _link . netlink . is_in_netlink
& & new - > _link . netlink . is_in_netlink
2016-04-15 20:53:15 +02:00
& & ( ( NM_FLAGS_HAS ( old - > link . n_ifi_flags , IFF_UP )
& & ! NM_FLAGS_HAS ( new - > link . n_ifi_flags , IFF_UP ) )
| | ( NM_FLAGS_HAS ( old - > link . n_ifi_flags , IFF_LOWER_UP )
& & ! NM_FLAGS_HAS ( new - > link . n_ifi_flags , IFF_LOWER_UP ) ) ) ) {
/* FIXME: I suspect that IFF_LOWER_UP must not be considered, and I
* think kernel does send RTM_DELROUTE events for IPv6 routes , so
* we might not need to refresh IPv6 routes . */
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
delayed_action_schedule ( platform ,
DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ROUTES |
DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ROUTES ,
NULL ) ;
}
2015-04-06 18:29:36 +02:00
}
2015-11-26 15:48:32 +01:00
if ( NM_IN_SET ( ops_type , NMP_CACHE_OPS_ADDED , NMP_CACHE_OPS_UPDATED )
& & ( new & & new - > _link . netlink . is_in_netlink )
& & ( ! old | | ! old - > _link . netlink . is_in_netlink ) )
{
if ( ! new - > _link . netlink . lnk ) {
/* certain link-types also come with a IFLA_INFO_DATA/lnk_data. It may happen that
* kernel didn ' t send this notification , thus when we first learn about a link
* that lacks an lnk_data we re - request it again .
*
* For example https : //bugzilla.redhat.com/show_bug.cgi?id=1284001 */
switch ( new - > link . type ) {
case NM_LINK_TYPE_GRE :
2015-11-27 22:22:25 +01:00
case NM_LINK_TYPE_IP6TNL :
2015-11-26 15:48:32 +01:00
case NM_LINK_TYPE_INFINIBAND :
case NM_LINK_TYPE_MACVLAN :
2015-12-04 09:49:39 +01:00
case NM_LINK_TYPE_MACVTAP :
2015-11-11 18:41:48 +01:00
case NM_LINK_TYPE_SIT :
2015-11-26 15:48:32 +01:00
case NM_LINK_TYPE_VLAN :
case NM_LINK_TYPE_VXLAN :
delayed_action_schedule ( platform ,
DELAYED_ACTION_TYPE_REFRESH_LINK ,
GINT_TO_POINTER ( new - > link . ifindex ) ) ;
break ;
default :
break ;
}
}
2015-11-26 12:02:29 +01:00
if ( new - > link . type = = NM_LINK_TYPE_VETH
& & new - > link . parent = = 0 ) {
/* the initial notification when adding a veth pair can lack the parent/IFLA_LINK
* ( https : //bugzilla.redhat.com/show_bug.cgi?id=1285827).
* Request it again . */
delayed_action_schedule ( platform ,
DELAYED_ACTION_TYPE_REFRESH_LINK ,
GINT_TO_POINTER ( new - > link . ifindex ) ) ;
}
2016-01-26 21:32:07 +01:00
if ( new - > link . type = = NM_LINK_TYPE_ETHERNET
& & new - > link . addr . len = = 0 ) {
/* Due to a kernel bug, we sometimes receive spurious NEWLINK
* messages after a wifi interface has disappeared . Since the
* link is not present anymore we can ' t determine its type and
* thus it will show up as a Ethernet one , with no address
* specified . Request the link again to check if it really
* exists . https : //bugzilla.redhat.com/show_bug.cgi?id=1302037
*/
delayed_action_schedule ( platform ,
DELAYED_ACTION_TYPE_REFRESH_LINK ,
GINT_TO_POINTER ( new - > link . ifindex ) ) ;
}
2015-11-26 15:48:32 +01:00
}
2015-04-06 18:29:36 +02:00
{
/* on enslave/release, we also refresh the master. */
int ifindex1 = 0 , ifindex2 = 0 ;
gboolean changed_master , changed_connected ;
changed_master = ( new & & new - > _link . netlink . is_in_netlink & & new - > link . master > 0 ? new - > link . master : 0 )
! = ( old & & old - > _link . netlink . is_in_netlink & & old - > link . master > 0 ? old - > link . master : 0 ) ;
2016-02-29 15:52:27 +01:00
changed_connected = ( new & & new - > _link . netlink . is_in_netlink ? NM_FLAGS_HAS ( new - > link . n_ifi_flags , IFF_LOWER_UP ) : 2 )
! = ( old & & old - > _link . netlink . is_in_netlink ? NM_FLAGS_HAS ( old - > link . n_ifi_flags , IFF_LOWER_UP ) : 2 ) ;
2015-04-06 18:29:36 +02:00
if ( changed_master | | changed_connected ) {
ifindex1 = ( old & & old - > _link . netlink . is_in_netlink & & old - > link . master > 0 ) ? old - > link . master : 0 ;
ifindex2 = ( new & & new - > _link . netlink . is_in_netlink & & new - > link . master > 0 ) ? new - > link . master : 0 ;
if ( ifindex1 > 0 )
delayed_action_schedule ( platform , DELAYED_ACTION_TYPE_REFRESH_LINK , GINT_TO_POINTER ( ifindex1 ) ) ;
if ( ifindex2 > 0 & & ifindex1 ! = ifindex2 )
delayed_action_schedule ( platform , DELAYED_ACTION_TYPE_REFRESH_LINK , GINT_TO_POINTER ( ifindex2 ) ) ;
}
platform: cope differently with spurious RTM_DELLINK message when unslaving bridge-slave
Unslaving from a bridge causes a wrong RTM_DELLINK event for
the former slave.
# ip link add dummy0 type dummy
# ip link add bridge0 type bridge
# ip link set bridge0 up
# ip link set dummy0 master bridge0
# ip monitor link &
# ip link set dummy0 nomaster
18: dummy0: <BROADCAST,NOARP> mtu 1500 qdisc noop master bridge0 state DOWN group default
link/ether 76:44:5f:b9:38:02 brd ff:ff:ff:ff:ff:ff
18: dummy0: <BROADCAST,NOARP> mtu 1500 master bridge0 state DOWN
link/ether 76:44:5f:b9:38:02
Deleted 18: dummy0: <BROADCAST,NOARP> mtu 1500 master bridge0 state DOWN
link/ether 76:44:5f:b9:38:02
18: dummy0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN group default
link/ether 76:44:5f:b9:38:02 brd ff:ff:ff:ff:ff:ff
19: bridge0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 00:00:00:00:00:00 brd ff:ff:ff:ff:ff:ff
19: bridge0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 00:00:00:00:00:00 brd ff:ff:ff:ff:ff:ff
Previously, during do_request_link() we would remember the link that is
about to be requested (delayed_deletion) and delay processing a new
RTM_DELLINK message until the end of do_request_link() -- and possibly
forget about about the deletion, if RTM_DELLINK was followed by a
RTM_NEWLINK.
However, this hack does not catch the case where an external command
unslaves the link.
Instead just accept the wrong event and raise a "removed" signal right
away. This brings the cache in an externally visible, wrong state that
will be fixed by a following "added" signal.
Still do that because working around the kernel bug is complicated. Also,
we already might emit wrong "added" signals for devices that are already
removed. As a consequence, a user should not consider the platform signals
until all events are processed.
Listeners to that signal should accept that added/removed link changes
can be wrong and should preferably handle them idly, when the events
have settled.
It can even be worse, that a RTM_DELLINK is not fixed by a following
RTM_NEWLINK:
...
# ip link set dummy0 nomaster
36: dummy0: <BROADCAST,NOARP> mtu 1500 qdisc noop master bridge0 state DOWN
link/ether e2:f2:20:98:3a:be brd ff:ff:ff:ff:ff:ff
36: dummy0: <BROADCAST,NOARP> mtu 1500 master bridge0 state DOWN
link/ether e2:f2:20:98:3a:be
Deleted 36: dummy0: <BROADCAST,NOARP> mtu 1500 master bridge0 state DOWN
link/ether e2:f2:20:98:3a:be
37: bridge0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN
link/ether 00:00:00:00:00:00 brd ff:ff:ff:ff:ff:ff
37: bridge0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN
link/ether 00:00:00:00:00:00 brd ff:ff:ff:ff:ff:ff
So, when a slave is deleted, we have to refetch it too.
https://bugzilla.redhat.com/show_bug.cgi?id=1285719
2015-11-27 11:20:58 +01:00
}
{
if ( ( ( ops_type = = NMP_CACHE_OPS_REMOVED )
| | ( ( ops_type = = NMP_CACHE_OPS_UPDATED )
& & new
& & ! new - > _link . netlink . is_in_netlink ) )
& & old
& & old - > _link . netlink . is_in_netlink
& & old - > link . master ) {
/* sometimes we receive a wrong RTM_DELLINK message when unslaving
* a device . Refetch the link again to check whether the device
* is really gone .
*
* https : //bugzilla.redhat.com/show_bug.cgi?id=1285719#c2 */
delayed_action_schedule ( platform , DELAYED_ACTION_TYPE_REFRESH_LINK , GINT_TO_POINTER ( old - > link . ifindex ) ) ;
}
2015-04-06 18:29:36 +02:00
}
break ;
2015-06-19 16:24:18 +02:00
case NMP_OBJECT_TYPE_IP4_ADDRESS :
case NMP_OBJECT_TYPE_IP6_ADDRESS :
2015-04-06 18:29:36 +02:00
{
/* Address deletion is sometimes accompanied by route deletion. We need to
* check all routes belonging to the same interface . */
if ( ops_type = = NMP_CACHE_OPS_REMOVED ) {
delayed_action_schedule ( platform ,
2015-06-19 16:24:18 +02:00
( klass - > obj_type = = NMP_OBJECT_TYPE_IP4_ADDRESS )
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
? DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ROUTES
: DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ROUTES ,
NULL ) ;
2015-04-06 18:29:36 +02:00
}
}
2016-04-10 11:21:50 +02:00
break ;
2015-04-06 18:29:36 +02:00
default :
break ;
}
}
2016-04-10 11:21:50 +02:00
static void
cache_post ( NMPlatform * platform ,
struct nlmsghdr * msghdr ,
NMPCacheOpsType cache_op ,
NMPObject * obj ,
NMPObject * obj_cache )
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
nm_assert ( NMP_OBJECT_IS_VALID ( obj ) ) ;
nm_assert ( ! obj_cache | | nmp_object_id_equal ( obj , obj_cache ) ) ;
if ( msghdr - > nlmsg_type = = RTM_NEWROUTE ) {
DelayedActionType action_type ;
action_type = NMP_OBJECT_GET_TYPE ( obj ) = = NMP_OBJECT_TYPE_IP4_ROUTE
? DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ROUTES
: DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ROUTES ;
if ( ! delayed_action_refresh_all_in_progress ( platform , action_type )
& & nmp_cache_find_other_route_for_same_destination ( priv - > cache , obj ) ) {
/* via `iproute route change` the user can update an existing route which effectively
* means that a new object ( with a different ID ) comes into existance , replacing the
* old on . In other words , as the ID of the object changes , we really see a new
* object with the old one deleted .
* However , kernel decides not to send a RTM_DELROUTE event for that .
*
* To hack around that , check if the update leaves us with multiple routes for the
* same network / plen , metric part . In that case , we cannot do better then requesting
* all routes anew , which sucks .
*
* One mitigation to avoid a dump is only to request a new dump , if we are not in
* the middle of an ongoing dump ( delayed_action_refresh_all_in_progress ) . */
delayed_action_schedule ( platform , action_type , NULL ) ;
}
}
}
2016-10-02 18:22:50 +02:00
/*****************************************************************************/
2015-04-25 14:53:39 +02:00
2015-12-11 18:20:54 +01:00
static int
2015-12-14 14:47:41 +01:00
_nl_send_auto_with_seq ( NMPlatform * platform ,
struct nl_msg * nlmsg ,
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
WaitForNlResponseResult * out_seq_result ,
gint * out_refresh_all_in_progess )
2015-04-06 18:29:36 +02:00
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
2015-12-11 18:20:54 +01:00
guint32 seq ;
int nle ;
2015-04-06 18:29:36 +02:00
2015-12-14 11:53:46 +01:00
/* complete the message with a sequence number (ensuring it's not zero). */
seq = priv - > nlh_seq_next + + ? : priv - > nlh_seq_next + + ;
2015-12-11 18:20:54 +01:00
nlmsg_hdr ( nlmsg ) - > nlmsg_seq = seq ;
2015-12-15 10:51:26 +01:00
nle = nl_send_auto ( priv - > nlh , nlmsg ) ;
2015-04-06 18:29:36 +02:00
2016-01-04 18:15:06 +01:00
if ( nle > = 0 ) {
nle = 0 ;
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
delayed_action_schedule_WAIT_FOR_NL_RESPONSE ( platform , seq , out_seq_result , out_refresh_all_in_progess ) ;
2016-01-04 18:15:06 +01:00
} else
2015-12-15 10:55:27 +01:00
_LOGD ( " netlink: send: failed sending message: %s (%d) " , nl_geterror ( nle ) , nle ) ;
2015-12-11 18:20:54 +01:00
return nle ;
2015-04-06 18:29:36 +02:00
}
static void
2015-12-14 14:47:41 +01:00
do_request_link_no_delayed_actions ( NMPlatform * platform , int ifindex , const char * name )
2015-04-06 18:29:36 +02:00
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
2015-10-20 09:27:16 +02:00
nm_auto_nlmsg struct nl_msg * nlmsg = NULL ;
2015-04-06 18:29:36 +02:00
2015-10-20 09:27:16 +02:00
if ( name & & ! name [ 0 ] )
name = NULL ;
g_return_if_fail ( ifindex > 0 | | name ) ;
_LOGD ( " do-request-link: %d %s " , ifindex , name ? name : " " ) ;
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
if ( ifindex > 0 ) {
cache_prune_candidates_record_one ( platform ,
( NMPObject * ) nmp_cache_lookup_link ( priv - > cache , ifindex ) ) ;
2015-04-06 18:29:36 +02:00
}
2015-12-15 10:40:41 +01:00
event_handler_read_netlink ( platform , FALSE ) ;
2015-04-06 18:29:36 +02:00
2015-10-20 09:27:16 +02:00
nlmsg = _nl_msg_new_link ( RTM_GETLINK ,
0 ,
ifindex ,
name ,
2015-11-02 14:27:22 +01:00
0 ,
2015-10-20 09:27:16 +02:00
0 ) ;
2015-12-11 18:20:54 +01:00
if ( nlmsg )
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
_nl_send_auto_with_seq ( platform , nlmsg , NULL , NULL ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
}
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
static void
2015-12-14 14:47:41 +01:00
do_request_link ( NMPlatform * platform , int ifindex , const char * name )
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
{
2015-12-14 14:47:41 +01:00
do_request_link_no_delayed_actions ( platform , ifindex , name ) ;
delayed_action_handle_all ( platform , FALSE ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
}
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
static void
2015-12-14 14:47:41 +01:00
do_request_all_no_delayed_actions ( NMPlatform * platform , DelayedActionType action_type )
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
DelayedActionType iflags ;
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
nm_assert ( ! NM_FLAGS_ANY ( action_type , ~ DELAYED_ACTION_TYPE_REFRESH_ALL ) ) ;
action_type & = DELAYED_ACTION_TYPE_REFRESH_ALL ;
2015-04-06 18:29:36 +02:00
2016-04-07 17:14:03 +02:00
FOR_EACH_DELAYED_ACTION ( iflags , action_type ) {
cache_prune_candidates_record_all ( platform , delayed_action_refresh_to_object_type ( iflags ) ) ;
}
2015-04-06 18:29:36 +02:00
2016-04-07 17:14:03 +02:00
FOR_EACH_DELAYED_ACTION ( iflags , action_type ) {
NMPObjectType obj_type = delayed_action_refresh_to_object_type ( iflags ) ;
const NMPClass * klass = nmp_class_from_type ( obj_type ) ;
nm_auto_nlmsg struct nl_msg * nlmsg = NULL ;
struct rtgenmsg gmsg = {
. rtgen_family = klass - > addr_family ,
} ;
int nle ;
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
gint * out_refresh_all_in_progess ;
out_refresh_all_in_progess = & priv - > delayed_action . refresh_all_in_progess [ delayed_action_refresh_all_to_idx ( iflags ) ] ;
nm_assert ( * out_refresh_all_in_progess > = 0 ) ;
* out_refresh_all_in_progess + = 1 ;
2015-04-06 18:29:36 +02:00
2016-04-07 17:14:03 +02:00
/* clear any delayed action that request a refresh of this object type. */
priv - > delayed_action . flags & = ~ iflags ;
_LOGt_delayed_action ( iflags , NULL , " handle (do-request-all) " ) ;
if ( obj_type = = NMP_OBJECT_TYPE_LINK ) {
priv - > delayed_action . flags & = ~ DELAYED_ACTION_TYPE_REFRESH_LINK ;
g_ptr_array_set_size ( priv - > delayed_action . list_refresh_link , 0 ) ;
_LOGt_delayed_action ( DELAYED_ACTION_TYPE_REFRESH_LINK , NULL , " clear (do-request-all) " ) ;
}
2015-10-20 09:27:16 +02:00
2016-04-07 17:14:03 +02:00
event_handler_read_netlink ( platform , FALSE ) ;
2015-10-20 09:27:16 +02:00
2016-04-07 17:14:03 +02:00
/* reimplement
* nl_rtgen_request ( sk , klass - > rtm_gettype , klass - > addr_family , NLM_F_DUMP ) ;
* because we need the sequence number .
*/
nlmsg = nlmsg_alloc_simple ( klass - > rtm_gettype , NLM_F_DUMP ) ;
if ( ! nlmsg )
continue ;
nle = nlmsg_append ( nlmsg , & gmsg , sizeof ( gmsg ) , NLMSG_ALIGNTO ) ;
if ( nle < 0 )
continue ;
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
if ( _nl_send_auto_with_seq ( platform , nlmsg , NULL , out_refresh_all_in_progess ) < 0 ) {
nm_assert ( * out_refresh_all_in_progess > 0 ) ;
* out_refresh_all_in_progess - = 1 ;
}
2015-04-06 18:29:36 +02:00
}
2015-12-14 14:47:41 +01:00
}
2015-04-06 18:29:36 +02:00
2015-12-14 14:47:41 +01:00
static void
do_request_one_type ( NMPlatform * platform , NMPObjectType obj_type )
{
do_request_all_no_delayed_actions ( platform , delayed_action_refresh_from_object_type ( obj_type ) ) ;
delayed_action_handle_all ( platform , FALSE ) ;
2015-04-06 18:29:36 +02:00
}
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
static void
event_seq_check_refresh_all ( NMPlatform * platform , guint32 seq_number )
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
DelayedActionWaitForNlResponseData * data ;
guint i ;
if ( NM_IN_SET ( seq_number , 0 , priv - > nlh_seq_last_seen ) )
return ;
if ( NM_FLAGS_HAS ( priv - > delayed_action . flags , DELAYED_ACTION_TYPE_WAIT_FOR_NL_RESPONSE ) ) {
nm_assert ( priv - > delayed_action . list_wait_for_nl_response - > len > 0 ) ;
for ( i = 0 ; i < priv - > delayed_action . list_wait_for_nl_response - > len ; i + + ) {
data = & g_array_index ( priv - > delayed_action . list_wait_for_nl_response , DelayedActionWaitForNlResponseData , i ) ;
if ( data - > seq_number = = priv - > nlh_seq_last_seen ) {
if ( data - > out_refresh_all_in_progess ) {
nm_assert ( * data - > out_refresh_all_in_progess > 0 ) ;
* data - > out_refresh_all_in_progess - = 1 ;
data - > out_refresh_all_in_progess = NULL ;
break ;
}
}
}
}
priv - > nlh_seq_last_seen = seq_number ;
}
2015-12-13 10:03:22 +01:00
static void
2016-04-07 21:19:45 +02:00
event_seq_check ( NMPlatform * platform , guint32 seq_number , WaitForNlResponseResult seq_result )
2015-04-06 18:29:36 +02:00
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
2015-12-14 14:47:41 +01:00
DelayedActionWaitForNlResponseData * data ;
guint i ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
2015-12-14 14:47:41 +01:00
if ( seq_number = = 0 )
2015-12-13 10:03:22 +01:00
return ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
2015-12-14 14:47:41 +01:00
if ( NM_FLAGS_HAS ( priv - > delayed_action . flags , DELAYED_ACTION_TYPE_WAIT_FOR_NL_RESPONSE ) ) {
nm_assert ( priv - > delayed_action . list_wait_for_nl_response - > len > 0 ) ;
for ( i = 0 ; i < priv - > delayed_action . list_wait_for_nl_response - > len ; i + + ) {
data = & g_array_index ( priv - > delayed_action . list_wait_for_nl_response , DelayedActionWaitForNlResponseData , i ) ;
if ( data - > seq_number = = seq_number ) {
/* We potentially receive many parts partial responses for the same sequence number.
* Thus , we only remember the result , and collect it later . */
if ( data - > seq_result < 0 ) {
/* we already saw an error for this seqence number.
* Preserve it . */
} else if ( seq_result ! = WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_UNKNOWN
| | data - > seq_result = = WAIT_FOR_NL_RESPONSE_RESULT_UNKNOWN )
data - > seq_result = seq_result ;
return ;
}
}
}
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
2016-04-07 21:16:51 +02:00
# ifdef NM_MORE_LOGGING
2015-12-14 14:47:41 +01:00
if ( seq_number ! = priv - > nlh_seq_last_handled )
_LOGt ( " netlink: recvmsg: unwaited sequence number %u " , seq_number ) ;
priv - > nlh_seq_last_handled = seq_number ;
2016-04-07 21:16:51 +02:00
# endif
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
}
2015-12-13 10:37:40 +01:00
static void
2016-01-24 18:46:14 +01:00
event_valid_msg ( NMPlatform * platform , struct nl_msg * msg , gboolean handle_events )
2013-03-27 22:23:24 +01:00
{
2015-12-13 10:37:40 +01:00
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
2015-10-20 14:43:31 +02:00
nm_auto_nmpobj NMPObject * obj = NULL ;
2015-10-12 16:07:01 +02:00
nm_auto_nmpobj NMPObject * obj_cache = NULL ;
2015-12-12 22:11:33 +01:00
NMPCacheOpsType cache_op ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
struct nlmsghdr * msghdr ;
2015-05-29 11:12:15 +02:00
char buf_nlmsg_type [ 16 ] ;
2015-10-12 16:07:01 +02:00
gboolean id_only = FALSE ;
2015-12-12 22:11:33 +01:00
gboolean was_visible ;
2013-03-27 22:23:24 +01:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
msghdr = nlmsg_hdr ( msg ) ;
2014-01-07 17:21:12 +01:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
if ( _support_kernel_extended_ifa_flags_still_undecided ( ) & & msghdr - > nlmsg_type = = RTM_NEWADDR )
2015-05-06 11:55:02 +02:00
_support_kernel_extended_ifa_flags_detect ( msg ) ;
2014-01-07 17:21:12 +01:00
2016-01-24 18:46:14 +01:00
if ( ! handle_events )
return ;
2015-10-12 16:07:01 +02:00
if ( NM_IN_SET ( msghdr - > nlmsg_type , RTM_DELLINK , RTM_DELADDR , RTM_DELROUTE ) ) {
/* The event notifies about a deleted object. We don't need to initialize all
* fields of the object . */
id_only = TRUE ;
}
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
2015-10-12 16:07:01 +02:00
obj = nmp_object_new_from_nl ( platform , priv - > cache , msg , id_only ) ;
if ( ! obj ) {
2015-11-06 12:58:55 +01:00
_LOGT ( " event-notification: %s, seq %u: ignore " ,
2015-08-05 18:20:00 +02:00
_nl_nlmsg_type_to_str ( msghdr - > nlmsg_type , buf_nlmsg_type , sizeof ( buf_nlmsg_type ) ) ,
2015-10-12 16:07:01 +02:00
msghdr - > nlmsg_seq ) ;
2015-12-13 10:37:40 +01:00
return ;
2015-08-05 18:20:00 +02:00
}
platform: avoid storing unknown netlink object types (bgo #742928)
Testing WWAN connections through a Nokia Series 40 phone, addresses of family
AF_PHONET end up triggering an assert() in object_has_ifindex(), just because
object_type_from_nl_object() only handles AF_INET and AF_INET6 address.
In order to avoid this kind of problems, we'll try to make sure that the object
caches kept by NM only store known object types.
(fixup by dcbw to use cached passed to cache_remove_unknown())
https://bugzilla.gnome.org/show_bug.cgi?id=742928
Connect: ppp0 <--> /dev/ttyACM0
nm-pppd-plugin-Message: nm-ppp-plugin: (nm_phasechange): status 5 / phase 'establish'
NetworkManager[27434]: <info> (ppp0): new Generic device (driver: 'unknown' ifindex: 12)
NetworkManager[27434]: <info> (ppp0): exported as /org/freedesktop/NetworkManager/Devices/4
[Thread 0x7ffff1ecf700 (LWP 27439) exited]
NetworkManager[27434]: <info> (ttyACM0): device state change: ip-config -> deactivating (reason 'user-requested') [70 110 39]
Terminating on signal 15
nm-pppd-plugin-Message: nm-ppp-plugin: (nm_phasechange): status 10 / phase 'terminate'
**
NetworkManager:ERROR:platform/nm-linux-platform.c:1534:object_has_ifindex: code should not be reached
Program received signal SIGABRT, Aborted.
0x00007ffff4692a97 in raise () from /usr/lib/libc.so.6
(gdb) bt
#0 0x00007ffff4692a97 in raise () from /usr/lib/libc.so.6
#1 0x00007ffff4693e6a in abort () from /usr/lib/libc.so.6
#2 0x00007ffff4c8d7f5 in g_assertion_message () from /usr/lib/libglib-2.0.so.0
#3 0x00007ffff4c8d88a in g_assertion_message_expr () from /usr/lib/libglib-2.0.so.0
#4 0x0000000000472b91 in object_has_ifindex (object=0x8a8320, ifindex=12) at platform/nm-linux-platform.c:1534
#5 0x0000000000472bec in check_cache_items (platform=0x7fe8a0, cache=0x7fda30, ifindex=12) at platform/nm-linux-platform.c:1549
#6 0x0000000000472de3 in announce_object (platform=0x7fe8a0, object=0x8a8c30, change_type=NM_PLATFORM_SIGNAL_REMOVED, reason=NM_PLATFORM_REASON_EXTERNAL) at platform/nm-linux-platform.c:1617
#7 0x0000000000473dd2 in event_notification (msg=0x8a7970, user_data=0x7fe8a0) at platform/nm-linux-platform.c:1992
#8 0x00007ffff5ee14de in nl_recvmsgs_report () from /usr/lib/libnl-3.so.200
#9 0x00007ffff5ee1849 in nl_recvmsgs () from /usr/lib/libnl-3.so.200
#10 0x00000000004794df in event_handler (channel=0x7fc930, io_condition=G_IO_IN, user_data=0x7fe8a0) at platform/nm-linux-platform.c:4152
#11 0x00007ffff4c6791d in g_main_context_dispatch () from /usr/lib/libglib-2.0.so.0
#12 0x00007ffff4c67cf8 in ?? () from /usr/lib/libglib-2.0.so.0
#13 0x00007ffff4c68022 in g_main_loop_run () from /usr/lib/libglib-2.0.so.0
#14 0x00000000004477ee in main (argc=1, argv=0x7fffffffeaa8) at main.c:447
(gdb) fr 4
#4 0x0000000000472b91 in object_has_ifindex (object=0x8a8320, ifindex=12) at platform/nm-linux-platform.c:1534
1534 g_assert_not_reached ();
2015-01-15 09:18:07 +01:00
2015-11-06 12:58:55 +01:00
_LOGT ( " event-notification: %s, seq %u: %s " ,
2015-10-12 16:07:01 +02:00
_nl_nlmsg_type_to_str ( msghdr - > nlmsg_type , buf_nlmsg_type , sizeof ( buf_nlmsg_type ) ) ,
msghdr - > nlmsg_seq , nmp_object_to_string ( obj ,
id_only ? NMP_OBJECT_TO_STRING_ID : NMP_OBJECT_TO_STRING_PUBLIC , NULL , 0 ) ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
2015-10-12 16:07:01 +02:00
switch ( msghdr - > nlmsg_type ) {
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
2015-10-12 16:07:01 +02:00
case RTM_NEWLINK :
case RTM_NEWADDR :
case RTM_NEWROUTE :
2016-08-10 11:54:30 +02:00
case RTM_GETLINK :
2015-12-12 22:11:33 +01:00
cache_op = nmp_cache_update_netlink ( priv - > cache , obj , & obj_cache , & was_visible , cache_pre_hook , platform ) ;
2016-04-10 11:21:50 +02:00
cache_post ( platform , msghdr , cache_op , obj , obj_cache ) ;
2015-12-12 22:11:33 +01:00
do_emit_signal ( platform , obj_cache , cache_op , was_visible ) ;
2015-10-12 16:07:01 +02:00
break ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
2015-10-12 16:07:01 +02:00
case RTM_DELLINK :
case RTM_DELADDR :
case RTM_DELROUTE :
2015-12-12 22:11:33 +01:00
cache_op = nmp_cache_remove_netlink ( priv - > cache , obj , & obj_cache , & was_visible , cache_pre_hook , platform ) ;
do_emit_signal ( platform , obj_cache , cache_op , was_visible ) ;
2015-10-12 16:07:01 +02:00
break ;
2014-09-29 17:58:44 +02:00
2015-10-12 16:07:01 +02:00
default :
break ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
}
2013-03-27 22:23:24 +01:00
2015-10-12 16:07:01 +02:00
cache_prune_candidates_drop ( platform , obj_cache ) ;
2013-03-27 22:23:24 +01:00
}
2016-10-02 18:22:50 +02:00
/*****************************************************************************/
2013-03-27 22:23:24 +01:00
2015-04-06 18:29:36 +02:00
static const NMPObject *
cache_lookup_link ( NMPlatform * platform , int ifindex )
{
const NMPObject * obj_cache ;
obj_cache = nmp_cache_lookup_link ( NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) - > cache , ifindex ) ;
platform: drop nm_platform_get_error()
For NMPlatform instances we had an error reporting mechanism
which stores the last error reason in a private field. Later we
would check it via nm_platform_get_error().
Remove this. It was not used much, and it is not a great way
to report errors.
One problem is that at the point where the error happens, you don't
know whether anybody cares about an error code. So, you add code to set
the error reason because somebody *might* need it (but in realitiy, almost
no caller cares).
Also, we tested this functionality which is hardly used in non-testing code.
While this was a burden to maintain in the tests, it was likely still buggy
because there were no real use-cases, beside the tests.
Then, sometimes platform functions call each other which might overwrite the
error reason. So, every function must be cautious to preserve/set
the error reason according to it's own meaning. This can involve storing
the error code, calling another function, and restoring it afterwards.
This is harder to get right compared to a "return-error-code" pattern, where
every function manages its error code independently.
It is better to return the error reason whenever due. For that we already
have our common glib patterns
(1) gboolean fcn (...);
(2) gboolean fcn (..., GError **error);
In few cases, we need more details then a #gboolean, but don't want
to bother constructing a #GError. Then we should do instead:
(3) NMPlatformError fcn (...);
2015-06-15 17:58:36 +02:00
if ( ! nmp_object_is_visible ( obj_cache ) )
2015-04-06 18:29:36 +02:00
return NULL ;
return obj_cache ;
}
2016-04-13 19:17:11 +02:00
const NMPlatformObject * const *
nm_linux_platform_lookup ( NMPlatform * platform , const NMPCacheId * cache_id , guint * out_len )
{
g_return_val_if_fail ( NM_IS_LINUX_PLATFORM ( platform ) , NULL ) ;
g_return_val_if_fail ( cache_id , NULL ) ;
return nmp_cache_lookup_multi ( NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) - > cache ,
cache_id , out_len ) ;
}
2013-03-27 22:23:24 +01:00
static GArray *
link_get_all ( NMPlatform * platform )
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
2016-12-01 10:56:09 +01:00
NMPCacheId cache_id ;
2013-03-27 22:23:24 +01:00
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
return nmp_cache_lookup_multi_to_array ( priv - > cache ,
2015-06-19 16:24:18 +02:00
NMP_OBJECT_TYPE_LINK ,
2016-12-01 10:56:09 +01:00
nmp_cache_id_init_object_type ( & cache_id , NMP_OBJECT_TYPE_LINK , TRUE ) ) ;
2013-03-27 22:23:24 +01:00
}
2015-06-20 12:05:01 +02:00
static const NMPlatformLink *
_nm_platform_link_get ( NMPlatform * platform , int ifindex )
2014-04-22 16:02:15 +02:00
{
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
const NMPObject * obj ;
obj = cache_lookup_link ( platform , ifindex ) ;
2015-06-20 12:05:01 +02:00
return obj ? & obj - > link : NULL ;
}
static const NMPlatformLink *
_nm_platform_link_get_by_ifname ( NMPlatform * platform ,
const char * ifname )
{
const NMPObject * obj = NULL ;
if ( ifname & & * ifname ) {
obj = nmp_cache_lookup_link_full ( NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) - > cache ,
0 , ifname , TRUE , NM_LINK_TYPE_NONE , NULL , NULL ) ;
}
return obj ? & obj - > link : NULL ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
}
2014-04-22 16:02:15 +02:00
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
struct _nm_platform_link_get_by_address_data {
gconstpointer address ;
guint8 length ;
} ;
static gboolean
_nm_platform_link_get_by_address_match_link ( const NMPObject * obj , struct _nm_platform_link_get_by_address_data * d )
{
return obj - > link . addr . len = = d - > length & & ! memcmp ( obj - > link . addr . data , d - > address , d - > length ) ;
2014-04-22 16:02:15 +02:00
}
2015-06-20 12:05:01 +02:00
static const NMPlatformLink *
2014-09-18 12:16:11 -05:00
_nm_platform_link_get_by_address ( NMPlatform * platform ,
gconstpointer address ,
2015-06-20 12:05:01 +02:00
size_t length )
2014-09-18 12:16:11 -05:00
{
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
const NMPObject * obj ;
struct _nm_platform_link_get_by_address_data d = {
. address = address ,
. length = length ,
} ;
2014-09-18 12:16:11 -05:00
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
if ( length < = 0 | | length > NM_UTILS_HWADDR_LEN_MAX )
2015-06-20 12:05:01 +02:00
return NULL ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
if ( ! address )
2015-06-20 12:05:01 +02:00
return NULL ;
2014-09-18 12:16:11 -05:00
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
obj = nmp_cache_lookup_link_full ( NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) - > cache ,
0 , NULL , TRUE , NM_LINK_TYPE_NONE ,
( NMPObjectMatchFn ) _nm_platform_link_get_by_address_match_link , & d ) ;
2015-06-20 12:05:01 +02:00
return obj ? & obj - > link : NULL ;
2014-09-18 12:16:11 -05:00
}
2015-10-12 13:44:44 +02:00
/*****************************************************************************/
2015-10-29 11:27:55 +01:00
static const NMPObject *
2015-10-12 13:44:44 +02:00
link_get_lnk ( NMPlatform * platform , int ifindex , NMLinkType link_type , const NMPlatformLink * * out_link )
{
const NMPObject * obj = cache_lookup_link ( platform , ifindex ) ;
if ( ! obj )
return NULL ;
NM_SET_OUT ( out_link , & obj - > link ) ;
2015-10-29 11:27:55 +01:00
if ( ! obj - > _link . netlink . lnk )
return NULL ;
if ( link_type ! = NM_LINK_TYPE_NONE
& & ( link_type ! = obj - > link . type
| | link_type ! = NMP_OBJECT_GET_CLASS ( obj - > _link . netlink . lnk ) - > lnk_link_type ) )
2015-10-12 13:44:44 +02:00
return NULL ;
2015-10-29 11:27:55 +01:00
return obj - > _link . netlink . lnk ;
2015-10-12 13:44:44 +02:00
}
/*****************************************************************************/
2015-04-06 18:29:36 +02:00
static gboolean
2015-12-14 17:16:13 +01:00
do_add_link_with_lookup ( NMPlatform * platform ,
NMLinkType link_type ,
const char * name ,
struct nl_msg * nlmsg ,
const NMPlatformLink * * out_link )
2015-04-06 18:29:36 +02:00
{
2015-10-20 09:27:16 +02:00
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
2015-12-14 17:16:13 +01:00
const NMPObject * obj = NULL ;
WaitForNlResponseResult seq_result = WAIT_FOR_NL_RESPONSE_RESULT_UNKNOWN ;
2015-04-06 18:29:36 +02:00
int nle ;
2015-12-14 17:16:13 +01:00
char s_buf [ 256 ] ;
2015-04-06 18:29:36 +02:00
2015-12-15 10:40:41 +01:00
event_handler_read_netlink ( platform , FALSE ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
2015-12-15 13:32:14 +01:00
if ( nmp_cache_lookup_link_full ( priv - > cache , 0 , name , FALSE , NM_LINK_TYPE_NONE , NULL , NULL ) ) {
/* hm, a link with such a name already exists. Try reloading first. */
do_request_link ( platform , 0 , name ) ;
obj = nmp_cache_lookup_link_full ( priv - > cache , 0 , name , FALSE , NM_LINK_TYPE_NONE , NULL , NULL ) ;
if ( obj ) {
_LOGE ( " do-add-link[%s/%s]: link already exists: %s " ,
name ,
nm_link_type_to_string ( link_type ) ,
nmp_object_to_string ( obj , NMP_OBJECT_TO_STRING_ID , NULL , 0 ) ) ;
return FALSE ;
}
}
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
nle = _nl_send_auto_with_seq ( platform , nlmsg , & seq_result , NULL ) ;
2015-04-06 18:29:36 +02:00
if ( nle < 0 ) {
2015-12-14 17:16:13 +01:00
_LOGE ( " do-add-link[%s/%s]: failed sending netlink request \" %s \" (%d) " ,
2015-10-20 09:27:16 +02:00
name ,
nm_link_type_to_string ( link_type ) ,
nl_geterror ( nle ) , - nle ) ;
return FALSE ;
}
2015-04-06 18:29:36 +02:00
2015-12-14 17:16:13 +01:00
delayed_action_handle_all ( platform , FALSE ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
2015-12-14 17:16:13 +01:00
nm_assert ( seq_result ) ;
2015-04-06 18:29:36 +02:00
2015-12-14 17:16:13 +01:00
_NMLOG ( seq_result = = WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_OK
? LOGL_DEBUG
: LOGL_ERR ,
" do-add-link[%s/%s]: %s " ,
name ,
nm_link_type_to_string ( link_type ) ,
wait_for_nl_response_to_string ( seq_result , s_buf , sizeof ( s_buf ) ) ) ;
2015-04-06 18:29:36 +02:00
2015-12-14 17:16:13 +01:00
if ( seq_result = = WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_OK )
obj = nmp_cache_lookup_link_full ( priv - > cache , 0 , name , FALSE , link_type , NULL , NULL ) ;
2015-04-06 18:29:36 +02:00
2015-12-14 17:16:13 +01:00
if ( ! obj ) {
/* either kernel signaled failure, or it signaled success and the link object
* is not ( yet ) in the cache . Try to reload it . . . */
do_request_link ( platform , 0 , name ) ;
obj = nmp_cache_lookup_link_full ( priv - > cache , 0 , name , FALSE , link_type , NULL , NULL ) ;
}
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
2015-12-09 15:13:57 +01:00
if ( out_link )
* out_link = obj ? & obj - > link : NULL ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
return ! ! obj ;
2015-04-06 18:29:36 +02:00
}
static gboolean
2015-10-20 09:27:16 +02:00
do_add_addrroute ( NMPlatform * platform , const NMPObject * obj_id , struct nl_msg * nlmsg )
2015-04-06 18:29:36 +02:00
{
2015-12-15 13:37:32 +01:00
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
2015-12-14 17:16:13 +01:00
WaitForNlResponseResult seq_result = WAIT_FOR_NL_RESPONSE_RESULT_UNKNOWN ;
2015-04-06 18:29:36 +02:00
int nle ;
2015-12-14 17:16:13 +01:00
char s_buf [ 256 ] ;
2015-12-15 13:37:32 +01:00
const NMPObject * obj ;
2015-04-06 18:29:36 +02:00
nm_assert ( NM_IN_SET ( NMP_OBJECT_GET_TYPE ( obj_id ) ,
2015-06-19 16:24:18 +02:00
NMP_OBJECT_TYPE_IP4_ADDRESS , NMP_OBJECT_TYPE_IP6_ADDRESS ,
NMP_OBJECT_TYPE_IP4_ROUTE , NMP_OBJECT_TYPE_IP6_ROUTE ) ) ;
2015-04-06 18:29:36 +02:00
2015-12-15 10:40:41 +01:00
event_handler_read_netlink ( platform , FALSE ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
nle = _nl_send_auto_with_seq ( platform , nlmsg , & seq_result , NULL ) ;
2015-04-06 18:29:36 +02:00
if ( nle < 0 ) {
2015-10-20 09:27:16 +02:00
_LOGE ( " do-add-%s[%s]: failure sending netlink request \" %s \" (%d) " ,
2015-04-06 18:29:36 +02:00
NMP_OBJECT_GET_CLASS ( obj_id ) - > obj_type_name ,
2015-10-20 09:27:16 +02:00
nmp_object_to_string ( obj_id , NMP_OBJECT_TO_STRING_ID , NULL , 0 ) ,
nl_geterror ( nle ) , - nle ) ;
return FALSE ;
}
2015-12-14 17:16:13 +01:00
delayed_action_handle_all ( platform , FALSE ) ;
2015-04-06 18:29:36 +02:00
2015-12-14 17:16:13 +01:00
nm_assert ( seq_result ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
2015-12-14 17:16:13 +01:00
_NMLOG ( seq_result = = WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_OK
? LOGL_DEBUG
: LOGL_ERR ,
" do-add-%s[%s]: %s " ,
NMP_OBJECT_GET_CLASS ( obj_id ) - > obj_type_name ,
nmp_object_to_string ( obj_id , NMP_OBJECT_TO_STRING_ID , NULL , 0 ) ,
wait_for_nl_response_to_string ( seq_result , s_buf , sizeof ( s_buf ) ) ) ;
/* In rare cases, the object is not yet ready as we received the ACK from
* kernel . Need to refetch .
*
* We want to safe the expensive refetch , thus we look first into the cache
* whether the object exists .
*
* FIXME : if the object already existed previously , we might not notice a
2015-12-15 13:37:32 +01:00
* missing update . It ' s not clear how to fix that reliably without refechting
* all the time . */
obj = nmp_cache_lookup_obj ( priv - > cache , obj_id ) ;
if ( ! obj ) {
2015-12-14 14:47:41 +01:00
do_request_one_type ( platform , NMP_OBJECT_GET_TYPE ( obj_id ) ) ;
2015-12-15 13:37:32 +01:00
obj = nmp_cache_lookup_obj ( priv - > cache , obj_id ) ;
}
2015-04-06 18:29:36 +02:00
2015-12-15 13:37:32 +01:00
/* Adding is only successful, if kernel reported success *and* we have the
* expected object in cache afterwards . */
return obj & & seq_result = = WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_OK ;
2015-04-06 18:29:36 +02:00
}
static gboolean
2015-10-20 09:27:16 +02:00
do_delete_object ( NMPlatform * platform , const NMPObject * obj_id , struct nl_msg * nlmsg )
2015-04-06 18:29:36 +02:00
{
2015-12-15 13:25:53 +01:00
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
2015-12-14 18:42:29 +01:00
WaitForNlResponseResult seq_result = WAIT_FOR_NL_RESPONSE_RESULT_UNKNOWN ;
2015-04-06 18:29:36 +02:00
int nle ;
2015-12-14 18:42:29 +01:00
char s_buf [ 256 ] ;
gboolean success = TRUE ;
const char * log_detail = " " ;
2015-04-06 18:29:36 +02:00
2015-12-15 10:40:41 +01:00
event_handler_read_netlink ( platform , FALSE ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
nle = _nl_send_auto_with_seq ( platform , nlmsg , & seq_result , NULL ) ;
2015-10-20 09:27:16 +02:00
if ( nle < 0 ) {
_LOGE ( " do-delete-%s[%s]: failure sending netlink request \" %s \" (%d) " ,
NMP_OBJECT_GET_CLASS ( obj_id ) - > obj_type_name ,
nmp_object_to_string ( obj_id , NMP_OBJECT_TO_STRING_ID , NULL , 0 ) ,
nl_geterror ( nle ) , - nle ) ;
2015-12-15 13:25:53 +01:00
goto out ;
2015-10-20 09:27:16 +02:00
}
2015-04-06 18:29:36 +02:00
2015-12-14 18:42:29 +01:00
delayed_action_handle_all ( platform , FALSE ) ;
2015-10-20 09:27:16 +02:00
2015-12-14 18:42:29 +01:00
nm_assert ( seq_result ) ;
2015-04-06 18:29:36 +02:00
2015-12-14 18:42:29 +01:00
if ( seq_result = = WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_OK ) {
/* ok */
} else if ( NM_IN_SET ( - ( ( int ) seq_result ) , ESRCH , ENOENT ) )
log_detail = " , meaning the object was already removed " ;
else if ( NM_IN_SET ( - ( ( int ) seq_result ) , ENXIO )
& & NM_IN_SET ( NMP_OBJECT_GET_TYPE ( obj_id ) , NMP_OBJECT_TYPE_IP6_ADDRESS ) ) {
/* On RHEL7 kernel, deleting a non existing address fails with ENXIO */
log_detail = " , meaning the address was already removed " ;
} else if ( NM_IN_SET ( - ( ( int ) seq_result ) , EADDRNOTAVAIL )
& & NM_IN_SET ( NMP_OBJECT_GET_TYPE ( obj_id ) , NMP_OBJECT_TYPE_IP4_ADDRESS , NMP_OBJECT_TYPE_IP6_ADDRESS ) )
log_detail = " , meaning the address was already removed " ;
else
success = FALSE ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
2015-12-14 18:42:29 +01:00
_NMLOG ( success ? LOGL_DEBUG : LOGL_ERR ,
" do-delete-%s[%s]: %s%s " ,
NMP_OBJECT_GET_CLASS ( obj_id ) - > obj_type_name ,
nmp_object_to_string ( obj_id , NMP_OBJECT_TO_STRING_ID , NULL , 0 ) ,
wait_for_nl_response_to_string ( seq_result , s_buf , sizeof ( s_buf ) ) ,
log_detail ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
2015-12-15 13:25:53 +01:00
out :
if ( ! nmp_cache_lookup_obj ( priv - > cache , obj_id ) )
return TRUE ;
2015-04-06 18:29:36 +02:00
2015-12-15 13:25:53 +01:00
/* such an object still exists in the cache. To be sure, refetch it (and
* hope it ' s gone ) */
do_request_one_type ( platform , NMP_OBJECT_GET_TYPE ( obj_id ) ) ;
return ! ! nmp_cache_lookup_obj ( priv - > cache , obj_id ) ;
2015-04-06 18:29:36 +02:00
}
2016-08-28 13:52:32 +02:00
static WaitForNlResponseResult
do_change_link_request ( NMPlatform * platform ,
int ifindex ,
struct nl_msg * nlmsg )
2015-04-06 18:29:36 +02:00
{
2016-03-08 13:02:09 +01:00
nm_auto_pop_netns NMPNetns * netns = NULL ;
2015-12-14 19:18:35 +01:00
WaitForNlResponseResult seq_result = WAIT_FOR_NL_RESPONSE_RESULT_UNKNOWN ;
2015-04-06 18:29:36 +02:00
int nle ;
2016-03-08 13:02:09 +01:00
if ( ! nm_platform_netns_push ( platform , & netns ) )
2016-08-30 16:20:40 +02:00
return WAIT_FOR_NL_RESPONSE_RESULT_UNKNOWN ;
2016-03-08 13:02:09 +01:00
2015-10-20 09:27:16 +02:00
retry :
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
nle = _nl_send_auto_with_seq ( platform , nlmsg , & seq_result , NULL ) ;
2015-10-20 09:27:16 +02:00
if ( nle < 0 ) {
_LOGE ( " do-change-link[%d]: failure sending netlink request \" %s \" (%d) " ,
ifindex ,
nl_geterror ( nle ) , - nle ) ;
2016-08-30 16:20:40 +02:00
return WAIT_FOR_NL_RESPONSE_RESULT_UNKNOWN ;
2015-10-20 09:27:16 +02:00
}
2015-04-06 18:29:36 +02:00
2015-12-14 19:18:35 +01:00
/* always refetch the link after changing it. There seems to be issues
* and we sometimes lack events . Nuke it from the orbit . . . */
delayed_action_schedule ( platform , DELAYED_ACTION_TYPE_REFRESH_LINK , GINT_TO_POINTER ( ifindex ) ) ;
delayed_action_handle_all ( platform , FALSE ) ;
nm_assert ( seq_result ) ;
if ( NM_IN_SET ( - ( ( int ) seq_result ) , EOPNOTSUPP )
2015-10-20 09:27:16 +02:00
& & nlmsg_hdr ( nlmsg ) - > nlmsg_type = = RTM_NEWLINK ) {
nlmsg_hdr ( nlmsg ) - > nlmsg_type = RTM_SETLINK ;
goto retry ;
}
2016-08-28 13:52:32 +02:00
return seq_result ;
}
static NMPlatformError
do_change_link_result ( NMPlatform * platform ,
int ifindex ,
WaitForNlResponseResult seq_result )
{
char s_buf [ 256 ] ;
NMPlatformError result = NM_PLATFORM_ERROR_SUCCESS ;
NMLogLevel log_level = LOGL_DEBUG ;
const char * log_result = " failure " , * log_detail = " " ;
2015-04-06 18:29:36 +02:00
2015-12-14 19:18:35 +01:00
if ( seq_result = = WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_OK ) {
log_result = " success " ;
} else if ( NM_IN_SET ( - ( ( int ) seq_result ) , EEXIST , EADDRINUSE ) ) {
/* */
} else if ( NM_IN_SET ( - ( ( int ) seq_result ) , ESRCH , ENOENT ) ) {
log_detail = " , firmware not found " ;
result = NM_PLATFORM_ERROR_NO_FIRMWARE ;
2016-07-05 10:41:18 +02:00
} else if ( NM_IN_SET ( - ( ( int ) seq_result ) , ENODEV ) ) {
log_level = LOGL_DEBUG ;
result = NM_PLATFORM_ERROR_NOT_FOUND ;
2015-12-14 19:18:35 +01:00
} else {
log_level = LOGL_ERR ;
result = NM_PLATFORM_ERROR_UNSPECIFIED ;
2015-04-06 18:29:36 +02:00
}
2015-12-14 19:18:35 +01:00
_NMLOG ( log_level ,
" do-change-link[%d]: %s changing link: %s%s " ,
ifindex ,
log_result ,
wait_for_nl_response_to_string ( seq_result , s_buf , sizeof ( s_buf ) ) ,
log_detail ) ;
2015-04-06 18:29:36 +02:00
2015-12-14 19:18:35 +01:00
return result ;
2015-04-06 18:29:36 +02:00
}
2016-08-28 13:52:32 +02:00
static NMPlatformError
do_change_link ( NMPlatform * platform ,
int ifindex ,
struct nl_msg * nlmsg )
{
WaitForNlResponseResult seq_result ;
seq_result = do_change_link_request ( platform , ifindex , nlmsg ) ;
return do_change_link_result ( platform , ifindex , seq_result ) ;
}
2014-09-18 12:53:19 -05:00
static gboolean
link_add ( NMPlatform * platform ,
const char * name ,
NMLinkType type ,
const void * address ,
size_t address_len ,
2015-12-09 15:13:57 +01:00
const NMPlatformLink * * out_link )
2013-03-27 22:23:24 +01:00
{
2015-10-20 09:27:16 +02:00
nm_auto_nlmsg struct nl_msg * nlmsg = NULL ;
2013-05-09 10:51:27 -05:00
2013-04-26 21:20:57 +02:00
if ( type = = NM_LINK_TYPE_BOND ) {
/* When the kernel loads the bond module, either via explicit modprobe
* or automatically in response to creating a bond master , it will also
* create a ' bond0 ' interface . Since the bond we ' re about to create may
* or may not be named ' bond0 ' prevent potential confusion about a bond
* that the user didn ' t want by telling the bonding module not to create
* bond0 automatically .
*/
if ( ! g_file_test ( " /sys/class/net/bonding_masters " , G_FILE_TEST_EXISTS ) )
2016-03-04 09:02:45 +01:00
( void ) nm_utils_modprobe ( NULL , TRUE , " bonding " , " max_bonds=0 " , NULL ) ;
2013-04-26 21:20:57 +02:00
}
2015-08-30 16:01:55 +02:00
_LOGD ( " link: add link '%s' of type '%s' (%d) " ,
2015-04-17 10:04:21 +02:00
name , nm_link_type_to_string ( type ) , ( int ) type ) ;
2014-03-05 10:56:16 +01:00
2015-10-20 09:27:16 +02:00
nlmsg = _nl_msg_new_link ( RTM_NEWLINK ,
2016-04-08 15:05:35 +02:00
NLM_F_CREATE | NLM_F_EXCL ,
2015-10-20 09:27:16 +02:00
0 ,
name ,
2015-11-02 14:27:22 +01:00
0 ,
2015-10-20 09:27:16 +02:00
0 ) ;
if ( ! nlmsg )
return FALSE ;
2014-05-13 18:13:52 +02:00
2015-10-20 09:27:16 +02:00
if ( address & & address_len )
NLA_PUT ( nlmsg , IFLA_ADDRESS , address_len , address ) ;
2014-05-13 18:13:52 +02:00
2015-10-20 09:27:16 +02:00
if ( ! _nl_msg_new_link_set_linkinfo ( nlmsg , type ) )
return FALSE ;
2014-09-18 12:53:19 -05:00
2015-10-20 09:27:16 +02:00
return do_add_link_with_lookup ( platform , type , name , nlmsg , out_link ) ;
nla_put_failure :
g_return_val_if_reached ( FALSE ) ;
2013-03-27 22:23:24 +01:00
}
static gboolean
link_delete ( NMPlatform * platform , int ifindex )
{
2015-10-20 09:27:16 +02:00
nm_auto_nlmsg struct nl_msg * nlmsg = NULL ;
2013-07-26 17:03:39 +02:00
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
2015-10-26 09:19:15 +01:00
NMPObject obj_id ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
const NMPObject * obj ;
2013-07-26 17:03:39 +02:00
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
obj = nmp_cache_lookup_link ( priv - > cache , ifindex ) ;
platform: drop nm_platform_get_error()
For NMPlatform instances we had an error reporting mechanism
which stores the last error reason in a private field. Later we
would check it via nm_platform_get_error().
Remove this. It was not used much, and it is not a great way
to report errors.
One problem is that at the point where the error happens, you don't
know whether anybody cares about an error code. So, you add code to set
the error reason because somebody *might* need it (but in realitiy, almost
no caller cares).
Also, we tested this functionality which is hardly used in non-testing code.
While this was a burden to maintain in the tests, it was likely still buggy
because there were no real use-cases, beside the tests.
Then, sometimes platform functions call each other which might overwrite the
error reason. So, every function must be cautious to preserve/set
the error reason according to it's own meaning. This can involve storing
the error code, calling another function, and restoring it afterwards.
This is harder to get right compared to a "return-error-code" pattern, where
every function manages its error code independently.
It is better to return the error reason whenever due. For that we already
have our common glib patterns
(1) gboolean fcn (...);
(2) gboolean fcn (..., GError **error);
In few cases, we need more details then a #gboolean, but don't want
to bother constructing a #GError. Then we should do instead:
(3) NMPlatformError fcn (...);
2015-06-15 17:58:36 +02:00
if ( ! obj | | ! obj - > _link . netlink . is_in_netlink )
2013-07-26 17:03:39 +02:00
return FALSE ;
2015-10-20 09:27:16 +02:00
nlmsg = _nl_msg_new_link ( RTM_DELLINK ,
0 ,
ifindex ,
NULL ,
2015-11-02 14:27:22 +01:00
0 ,
2015-10-20 09:27:16 +02:00
0 ) ;
2015-10-26 09:19:15 +01:00
nmp_object_stackinit_id_link ( & obj_id , ifindex ) ;
2015-10-20 09:27:16 +02:00
return do_delete_object ( platform , & obj_id , nlmsg ) ;
2013-03-27 22:23:24 +01:00
}
2013-04-26 11:43:08 -04:00
static const char *
link_get_type_name ( NMPlatform * platform , int ifindex )
{
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
const NMPObject * obj = cache_lookup_link ( platform , ifindex ) ;
2015-04-28 10:11:04 +02:00
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
if ( ! obj )
2015-04-28 10:11:04 +02:00
return NULL ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
if ( obj - > link . type ! = NM_LINK_TYPE_UNKNOWN ) {
2015-04-28 10:11:04 +02:00
/* We could detect the @link_type. In this case the function returns
* our internel module names , which differs from rtnl_link_get_type ( ) :
* - NM_LINK_TYPE_INFINIBAND ( gives " infiniband " , instead of " ipoib " )
* - NM_LINK_TYPE_TAP ( gives " tap " , instead of " tun " ) .
* Note that this functions is only used by NMDeviceGeneric to
* set type_description . */
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
return nm_link_type_to_string ( obj - > link . type ) ;
2015-04-28 10:11:04 +02:00
}
/* Link type not detected. Fallback to rtnl_link_get_type()/IFLA_INFO_KIND. */
2016-03-08 17:41:31 +01:00
return obj - > link . kind ? : " unknown " ;
2013-03-27 22:23:24 +01:00
}
2015-01-22 16:41:15 +01:00
static gboolean
2015-06-24 14:21:27 +02:00
link_get_unmanaged ( NMPlatform * platform , int ifindex , gboolean * unmanaged )
2015-01-22 16:41:15 +01:00
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
const NMPObject * link ;
GUdevDevice * udev_device = NULL ;
link = nmp_cache_lookup_link ( priv - > cache , ifindex ) ;
if ( link )
udev_device = link - > _link . udev . device ;
2015-01-22 16:41:15 +01:00
2015-03-27 13:58:52 -05:00
if ( udev_device & & g_udev_device_get_property ( udev_device , " NM_UNMANAGED " ) ) {
2015-06-24 14:21:27 +02:00
* unmanaged = g_udev_device_get_property_as_boolean ( udev_device , " NM_UNMANAGED " ) ;
2015-01-22 16:41:15 +01:00
return TRUE ;
}
return FALSE ;
}
2014-02-11 13:58:00 +01:00
static gboolean
link_refresh ( NMPlatform * platform , int ifindex )
{
2015-12-14 14:47:41 +01:00
do_request_link ( platform , ifindex , NULL ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
return ! ! cache_lookup_link ( platform , ifindex ) ;
2014-02-11 13:58:00 +01:00
}
2016-03-08 13:02:58 +01:00
static gboolean
link_set_netns ( NMPlatform * platform ,
int ifindex ,
int netns_fd )
{
nm_auto_nlmsg struct nl_msg * nlmsg = NULL ;
_LOGD ( " link: move link %d to network namespace with fd %d " , ifindex , netns_fd ) ;
nlmsg = _nl_msg_new_link ( RTM_NEWLINK ,
0 ,
ifindex ,
NULL ,
0 ,
0 ) ;
if ( ! nlmsg )
return FALSE ;
NLA_PUT ( nlmsg , IFLA_NET_NS_FD , 4 , & netns_fd ) ;
return do_change_link ( platform , ifindex , nlmsg ) = = NM_PLATFORM_ERROR_SUCCESS ;
nla_put_failure :
g_return_val_if_reached ( FALSE ) ;
}
2015-06-15 17:41:27 +02:00
static NMPlatformError
2015-11-02 14:27:22 +01:00
link_change_flags ( NMPlatform * platform ,
int ifindex ,
unsigned flags_mask ,
unsigned flags_set )
2013-03-27 22:23:24 +01:00
{
2015-10-20 09:27:16 +02:00
nm_auto_nlmsg struct nl_msg * nlmsg = NULL ;
2015-11-02 14:27:22 +01:00
char s_flags [ 100 ] ;
2013-03-27 22:23:24 +01:00
2015-11-02 14:27:22 +01:00
_LOGD ( " link: change %d: flags: set 0x%x/0x%x ([%s] / [%s]) " ,
ifindex ,
flags_set ,
flags_mask ,
nm_platform_link_flags2str ( flags_set , s_flags , sizeof ( s_flags ) ) ,
nm_platform_link_flags2str ( flags_mask , NULL , 0 ) ) ;
2014-03-05 10:56:16 +01:00
2015-10-20 09:27:16 +02:00
nlmsg = _nl_msg_new_link ( RTM_NEWLINK ,
0 ,
ifindex ,
NULL ,
2015-11-02 14:27:22 +01:00
flags_mask ,
flags_set ) ;
2015-10-20 09:27:16 +02:00
if ( ! nlmsg )
return NM_PLATFORM_ERROR_UNSPECIFIED ;
return do_change_link ( platform , ifindex , nlmsg ) ;
2013-03-27 22:23:24 +01:00
}
static gboolean
2015-06-15 17:41:27 +02:00
link_set_up ( NMPlatform * platform , int ifindex , gboolean * out_no_firmware )
2013-03-27 22:23:24 +01:00
{
2015-06-15 17:41:27 +02:00
NMPlatformError plerr ;
2015-11-02 14:27:22 +01:00
plerr = link_change_flags ( platform , ifindex , IFF_UP , IFF_UP ) ;
2015-06-15 17:41:27 +02:00
if ( out_no_firmware )
* out_no_firmware = plerr = = NM_PLATFORM_ERROR_NO_FIRMWARE ;
return plerr = = NM_PLATFORM_ERROR_SUCCESS ;
2013-03-27 22:23:24 +01:00
}
static gboolean
link_set_down ( NMPlatform * platform , int ifindex )
{
2015-11-02 14:27:22 +01:00
return link_change_flags ( platform , ifindex , IFF_UP , 0 ) = = NM_PLATFORM_ERROR_SUCCESS ;
2013-03-27 22:23:24 +01:00
}
static gboolean
link_set_arp ( NMPlatform * platform , int ifindex )
{
2015-11-02 14:27:22 +01:00
return link_change_flags ( platform , ifindex , IFF_NOARP , 0 ) = = NM_PLATFORM_ERROR_SUCCESS ;
2013-03-27 22:23:24 +01:00
}
static gboolean
link_set_noarp ( NMPlatform * platform , int ifindex )
{
2015-11-02 14:27:22 +01:00
return link_change_flags ( platform , ifindex , IFF_NOARP , IFF_NOARP ) = = NM_PLATFORM_ERROR_SUCCESS ;
2013-03-27 22:23:24 +01:00
}
2015-06-15 14:41:35 +02:00
static const char *
link_get_udi ( NMPlatform * platform , int ifindex )
{
const NMPObject * obj = cache_lookup_link ( platform , ifindex ) ;
if ( ! obj
| | ! obj - > _link . netlink . is_in_netlink
| | ! obj - > _link . udev . device )
return NULL ;
return g_udev_device_get_sysfs_path ( obj - > _link . udev . device ) ;
}
2015-06-15 15:19:28 +02:00
static GObject *
link_get_udev_device ( NMPlatform * platform , int ifindex )
{
const NMPObject * obj_cache ;
/* we don't use cache_lookup_link() because this would return NULL
* if the link is not visible in libnl . For link_get_udev_device ( )
* we want to return whatever we have , even if the link itself
* appears invisible via other platform functions . */
obj_cache = nmp_cache_lookup_link ( NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) - > cache , ifindex ) ;
return obj_cache ? ( GObject * ) obj_cache - > _link . udev . device : NULL ;
}
device: tune down warning about failure to set userspace IPv6LL on non-existing device
When a device gets removed externally, we still try to clear userspace IPv6LL address handling.
That fails, due to non-existing device. Such a failure should not be logged as warning.
<debug> [1467723214.2078] device[0x558c59335ca0] (enp0s25): disposing
<debug> [1467723214.2079] device[0x558c59335ca0] (enp0s25): remove_pending_action (0): 'dhcp6' not pending (expected)
<debug> [1467723214.2079] device[0x558c59335ca0] (enp0s25): remove_pending_action (0): 'autoconf6' not pending (expected)
<debug> [1467723214.2079] device[0x558c59335ca0] (enp0s25): will disable userland IPv6LL
<debug> [1467723214.2079] platform-linux: link: change 20: user-ipv6ll: set IPv6 address generation mode to eui64
<trace> [1467723214.2080] platform-linux: delayed-action: schedule wait-for-nl-response (seq 92, timeout in 0.199998611)
<trace> [1467723214.2080] platform-linux: delayed-action: schedule refresh-link (ifindex 20)
<trace> [1467723214.2080] platform-linux: delayed-action: handle refresh-link (ifindex 20)
<debug> [1467723214.2080] platform-linux: do-request-link: 20
<trace> [1467723214.2080] platform-linux: netlink: recvmsg: new message type 2, seq 92
<debug> [1467723214.2080] platform-linux: netlink: recvmsg: error message from kernel: No such device (19) for request 92
<trace> [1467723214.2081] platform-linux: delayed-action: complete wait-for-nl-response (seq 92, timeout in 0.199895684, failure 19 (No such device))
<trace> [1467723214.2081] platform-linux: delayed-action: schedule wait-for-nl-response (seq 93, timeout in 0.199999306)
<trace> [1467723214.2081] platform-linux: delayed-action: handle wait-for-nl-response (any)
<trace> [1467723214.2081] platform-linux: netlink: recvmsg: new message type 2, seq 93
<debug> [1467723214.2081] platform-linux: netlink: recvmsg: error message from kernel: No such device (19) for request 93
<trace> [1467723214.2082] platform-linux: delayed-action: complete wait-for-nl-response (seq 93, timeout in 0.199921142, failure 19 (No such device))
<debug> [1467723214.2082] platform-linux: do-change-link[20]: failure changing link: failure 19 (No such device)
<warn> [1467723214.2082] device (enp0s25): failed to disable userspace IPv6LL address handling
https://bugzilla.redhat.com/show_bug.cgi?id=1323571
2016-07-05 15:04:37 +02:00
static NMPlatformError
2014-07-24 15:57:08 -05:00
link_set_user_ipv6ll_enabled ( NMPlatform * platform , int ifindex , gboolean enabled )
{
2015-10-20 09:27:16 +02:00
nm_auto_nlmsg struct nl_msg * nlmsg = NULL ;
guint8 mode = enabled ? NM_IN6_ADDR_GEN_MODE_NONE : NM_IN6_ADDR_GEN_MODE_EUI64 ;
2014-07-24 15:57:08 -05:00
2015-10-20 09:27:16 +02:00
if ( ! _support_user_ipv6ll_get ( ) ) {
_LOGD ( " link: change %d: user-ipv6ll: not supported " , ifindex ) ;
device: tune down warning about failure to set userspace IPv6LL on non-existing device
When a device gets removed externally, we still try to clear userspace IPv6LL address handling.
That fails, due to non-existing device. Such a failure should not be logged as warning.
<debug> [1467723214.2078] device[0x558c59335ca0] (enp0s25): disposing
<debug> [1467723214.2079] device[0x558c59335ca0] (enp0s25): remove_pending_action (0): 'dhcp6' not pending (expected)
<debug> [1467723214.2079] device[0x558c59335ca0] (enp0s25): remove_pending_action (0): 'autoconf6' not pending (expected)
<debug> [1467723214.2079] device[0x558c59335ca0] (enp0s25): will disable userland IPv6LL
<debug> [1467723214.2079] platform-linux: link: change 20: user-ipv6ll: set IPv6 address generation mode to eui64
<trace> [1467723214.2080] platform-linux: delayed-action: schedule wait-for-nl-response (seq 92, timeout in 0.199998611)
<trace> [1467723214.2080] platform-linux: delayed-action: schedule refresh-link (ifindex 20)
<trace> [1467723214.2080] platform-linux: delayed-action: handle refresh-link (ifindex 20)
<debug> [1467723214.2080] platform-linux: do-request-link: 20
<trace> [1467723214.2080] platform-linux: netlink: recvmsg: new message type 2, seq 92
<debug> [1467723214.2080] platform-linux: netlink: recvmsg: error message from kernel: No such device (19) for request 92
<trace> [1467723214.2081] platform-linux: delayed-action: complete wait-for-nl-response (seq 92, timeout in 0.199895684, failure 19 (No such device))
<trace> [1467723214.2081] platform-linux: delayed-action: schedule wait-for-nl-response (seq 93, timeout in 0.199999306)
<trace> [1467723214.2081] platform-linux: delayed-action: handle wait-for-nl-response (any)
<trace> [1467723214.2081] platform-linux: netlink: recvmsg: new message type 2, seq 93
<debug> [1467723214.2081] platform-linux: netlink: recvmsg: error message from kernel: No such device (19) for request 93
<trace> [1467723214.2082] platform-linux: delayed-action: complete wait-for-nl-response (seq 93, timeout in 0.199921142, failure 19 (No such device))
<debug> [1467723214.2082] platform-linux: do-change-link[20]: failure changing link: failure 19 (No such device)
<warn> [1467723214.2082] device (enp0s25): failed to disable userspace IPv6LL address handling
https://bugzilla.redhat.com/show_bug.cgi?id=1323571
2016-07-05 15:04:37 +02:00
return NM_PLATFORM_ERROR_OPNOTSUPP ;
2014-07-24 15:57:08 -05:00
}
2015-10-20 09:27:16 +02:00
_LOGD ( " link: change %d: user-ipv6ll: set IPv6 address generation mode to %s " ,
ifindex ,
nm_platform_link_inet6_addrgenmode2str ( mode , NULL , 0 ) ) ;
nlmsg = _nl_msg_new_link ( RTM_NEWLINK ,
0 ,
ifindex ,
NULL ,
2015-11-02 14:27:22 +01:00
0 ,
0 ) ;
2015-10-20 09:27:16 +02:00
if ( ! nlmsg
2016-04-30 16:48:32 +02:00
| | ! _nl_msg_new_link_set_afspec ( nlmsg , mode , NULL ) )
device: tune down warning about failure to set userspace IPv6LL on non-existing device
When a device gets removed externally, we still try to clear userspace IPv6LL address handling.
That fails, due to non-existing device. Such a failure should not be logged as warning.
<debug> [1467723214.2078] device[0x558c59335ca0] (enp0s25): disposing
<debug> [1467723214.2079] device[0x558c59335ca0] (enp0s25): remove_pending_action (0): 'dhcp6' not pending (expected)
<debug> [1467723214.2079] device[0x558c59335ca0] (enp0s25): remove_pending_action (0): 'autoconf6' not pending (expected)
<debug> [1467723214.2079] device[0x558c59335ca0] (enp0s25): will disable userland IPv6LL
<debug> [1467723214.2079] platform-linux: link: change 20: user-ipv6ll: set IPv6 address generation mode to eui64
<trace> [1467723214.2080] platform-linux: delayed-action: schedule wait-for-nl-response (seq 92, timeout in 0.199998611)
<trace> [1467723214.2080] platform-linux: delayed-action: schedule refresh-link (ifindex 20)
<trace> [1467723214.2080] platform-linux: delayed-action: handle refresh-link (ifindex 20)
<debug> [1467723214.2080] platform-linux: do-request-link: 20
<trace> [1467723214.2080] platform-linux: netlink: recvmsg: new message type 2, seq 92
<debug> [1467723214.2080] platform-linux: netlink: recvmsg: error message from kernel: No such device (19) for request 92
<trace> [1467723214.2081] platform-linux: delayed-action: complete wait-for-nl-response (seq 92, timeout in 0.199895684, failure 19 (No such device))
<trace> [1467723214.2081] platform-linux: delayed-action: schedule wait-for-nl-response (seq 93, timeout in 0.199999306)
<trace> [1467723214.2081] platform-linux: delayed-action: handle wait-for-nl-response (any)
<trace> [1467723214.2081] platform-linux: netlink: recvmsg: new message type 2, seq 93
<debug> [1467723214.2081] platform-linux: netlink: recvmsg: error message from kernel: No such device (19) for request 93
<trace> [1467723214.2082] platform-linux: delayed-action: complete wait-for-nl-response (seq 93, timeout in 0.199921142, failure 19 (No such device))
<debug> [1467723214.2082] platform-linux: do-change-link[20]: failure changing link: failure 19 (No such device)
<warn> [1467723214.2082] device (enp0s25): failed to disable userspace IPv6LL address handling
https://bugzilla.redhat.com/show_bug.cgi?id=1323571
2016-07-05 15:04:37 +02:00
g_return_val_if_reached ( NM_PLATFORM_ERROR_BUG ) ;
2016-04-30 16:48:32 +02:00
device: tune down warning about failure to set userspace IPv6LL on non-existing device
When a device gets removed externally, we still try to clear userspace IPv6LL address handling.
That fails, due to non-existing device. Such a failure should not be logged as warning.
<debug> [1467723214.2078] device[0x558c59335ca0] (enp0s25): disposing
<debug> [1467723214.2079] device[0x558c59335ca0] (enp0s25): remove_pending_action (0): 'dhcp6' not pending (expected)
<debug> [1467723214.2079] device[0x558c59335ca0] (enp0s25): remove_pending_action (0): 'autoconf6' not pending (expected)
<debug> [1467723214.2079] device[0x558c59335ca0] (enp0s25): will disable userland IPv6LL
<debug> [1467723214.2079] platform-linux: link: change 20: user-ipv6ll: set IPv6 address generation mode to eui64
<trace> [1467723214.2080] platform-linux: delayed-action: schedule wait-for-nl-response (seq 92, timeout in 0.199998611)
<trace> [1467723214.2080] platform-linux: delayed-action: schedule refresh-link (ifindex 20)
<trace> [1467723214.2080] platform-linux: delayed-action: handle refresh-link (ifindex 20)
<debug> [1467723214.2080] platform-linux: do-request-link: 20
<trace> [1467723214.2080] platform-linux: netlink: recvmsg: new message type 2, seq 92
<debug> [1467723214.2080] platform-linux: netlink: recvmsg: error message from kernel: No such device (19) for request 92
<trace> [1467723214.2081] platform-linux: delayed-action: complete wait-for-nl-response (seq 92, timeout in 0.199895684, failure 19 (No such device))
<trace> [1467723214.2081] platform-linux: delayed-action: schedule wait-for-nl-response (seq 93, timeout in 0.199999306)
<trace> [1467723214.2081] platform-linux: delayed-action: handle wait-for-nl-response (any)
<trace> [1467723214.2081] platform-linux: netlink: recvmsg: new message type 2, seq 93
<debug> [1467723214.2081] platform-linux: netlink: recvmsg: error message from kernel: No such device (19) for request 93
<trace> [1467723214.2082] platform-linux: delayed-action: complete wait-for-nl-response (seq 93, timeout in 0.199921142, failure 19 (No such device))
<debug> [1467723214.2082] platform-linux: do-change-link[20]: failure changing link: failure 19 (No such device)
<warn> [1467723214.2082] device (enp0s25): failed to disable userspace IPv6LL address handling
https://bugzilla.redhat.com/show_bug.cgi?id=1323571
2016-07-05 15:04:37 +02:00
return do_change_link ( platform , ifindex , nlmsg ) ;
2016-04-30 16:48:32 +02:00
}
static gboolean
link_set_token ( NMPlatform * platform , int ifindex , NMUtilsIPv6IfaceId iid )
{
nm_auto_nlmsg struct nl_msg * nlmsg = NULL ;
_LOGD ( " link: change %d: token: set IPv6 address generation token to %s " ,
ifindex , nm_utils_inet6_interface_identifier_to_token ( iid , NULL ) ) ;
nlmsg = _nl_msg_new_link ( RTM_NEWLINK , 0 , ifindex , NULL , 0 , 0 ) ;
if ( ! nlmsg | | ! _nl_msg_new_link_set_afspec ( nlmsg , - 1 , & iid ) )
2015-12-14 19:18:35 +01:00
g_return_val_if_reached ( FALSE ) ;
2015-10-20 09:27:16 +02:00
return do_change_link ( platform , ifindex , nlmsg ) = = NM_PLATFORM_ERROR_SUCCESS ;
2014-07-24 15:57:08 -05:00
}
2013-05-20 15:38:54 -03:00
static gboolean
link_supports_carrier_detect ( NMPlatform * platform , int ifindex )
{
2016-02-19 01:06:28 +01:00
nm_auto_pop_netns NMPNetns * netns = NULL ;
2013-05-20 15:38:54 -03:00
2016-02-19 01:06:28 +01:00
if ( ! nm_platform_netns_push ( platform , & netns ) )
return FALSE ;
2013-05-20 15:38:54 -03:00
/* We use netlink for the actual carrier detection, but netlink can't tell
* us whether the device actually supports carrier detection in the first
* place . We assume any device that does implements one of these two APIs .
*/
2016-12-11 22:46:14 +01:00
return nmp_utils_ethtool_supports_carrier_detect ( ifindex ) | | nmp_utils_mii_supports_carrier_detect ( ifindex ) ;
2013-03-27 22:53:55 +01:00
}
static gboolean
link_supports_vlans ( NMPlatform * platform , int ifindex )
{
2016-02-19 01:06:28 +01:00
nm_auto_pop_netns NMPNetns * netns = NULL ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
const NMPObject * obj ;
obj = cache_lookup_link ( platform , ifindex ) ;
2013-03-27 22:53:55 +01:00
2013-04-17 12:30:09 +02:00
/* Only ARPHRD_ETHER links can possibly support VLANs. */
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
if ( ! obj | | obj - > link . arptype ! = ARPHRD_ETHER )
2013-03-27 22:53:55 +01:00
return FALSE ;
2016-02-19 01:06:28 +01:00
if ( ! nm_platform_netns_push ( platform , & netns ) )
return FALSE ;
2016-12-11 22:46:14 +01:00
return nmp_utils_ethtool_supports_vlans ( ifindex ) ;
2013-03-27 22:53:55 +01:00
}
2016-07-05 10:41:18 +02:00
static NMPlatformError
2013-03-27 22:53:55 +01:00
link_set_address ( NMPlatform * platform , int ifindex , gconstpointer address , size_t length )
{
2015-10-20 09:27:16 +02:00
nm_auto_nlmsg struct nl_msg * nlmsg = NULL ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
gs_free char * mac = NULL ;
2016-08-28 14:08:42 +02:00
WaitForNlResponseResult seq_result ;
char s_buf [ 256 ] ;
2013-03-27 22:53:55 +01:00
2015-10-20 09:27:16 +02:00
if ( ! address | | ! length )
2016-07-05 10:41:18 +02:00
g_return_val_if_reached ( NM_PLATFORM_ERROR_BUG ) ;
2013-03-27 22:53:55 +01:00
2015-10-20 09:27:16 +02:00
_LOGD ( " link: change %d: address: %s (%lu bytes) " , ifindex ,
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
( mac = nm_utils_hwaddr_ntoa ( address , length ) ) ,
( unsigned long ) length ) ;
2015-10-20 09:27:16 +02:00
nlmsg = _nl_msg_new_link ( RTM_NEWLINK ,
0 ,
ifindex ,
NULL ,
2015-11-02 14:27:22 +01:00
0 ,
0 ) ;
2015-10-20 09:27:16 +02:00
if ( ! nlmsg )
2016-07-05 10:41:18 +02:00
g_return_val_if_reached ( NM_PLATFORM_ERROR_UNSPECIFIED ) ;
2015-10-20 09:27:16 +02:00
NLA_PUT ( nlmsg , IFLA_ADDRESS , length , address ) ;
2016-08-28 14:08:42 +02:00
seq_result = do_change_link_request ( platform , ifindex , nlmsg ) ;
if ( NM_IN_SET ( - ( ( int ) seq_result ) , ENFILE ) ) {
const NMPObject * obj_cache ;
/* workaround ENFILE which may be wrongly returned (bgo #770456).
* If the MAC address is as expected , assume success ? */
obj_cache = nmp_cache_lookup_link ( NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) - > cache , ifindex ) ;
if ( obj_cache
& & obj_cache - > link . addr . len = = length
& & memcmp ( obj_cache - > link . addr . data , address , length ) = = 0 ) {
_NMLOG ( LOGL_DEBUG ,
" do-change-link[%d]: %s changing link: %s%s " ,
ifindex ,
" success " ,
wait_for_nl_response_to_string ( seq_result , s_buf , sizeof ( s_buf ) ) ,
" (assume success changing address) " ) ;
return NM_PLATFORM_ERROR_SUCCESS ;
}
}
return do_change_link_result ( platform , ifindex , seq_result ) ;
2015-10-20 09:27:16 +02:00
nla_put_failure :
2016-07-05 10:41:18 +02:00
g_return_val_if_reached ( NM_PLATFORM_ERROR_UNSPECIFIED ) ;
2013-03-27 22:53:55 +01:00
}
2014-10-03 17:37:26 -05:00
static gboolean
link_get_permanent_address ( NMPlatform * platform ,
int ifindex ,
guint8 * buf ,
size_t * length )
{
2016-02-19 01:06:28 +01:00
nm_auto_pop_netns NMPNetns * netns = NULL ;
if ( ! nm_platform_netns_push ( platform , & netns ) )
return FALSE ;
2016-12-11 22:46:14 +01:00
return nmp_utils_ethtool_get_permanent_address ( ifindex , buf , length ) ;
2014-10-03 17:37:26 -05:00
}
2013-04-15 21:48:12 +02:00
static gboolean
link_set_mtu ( NMPlatform * platform , int ifindex , guint32 mtu )
{
2015-10-20 09:27:16 +02:00
nm_auto_nlmsg struct nl_msg * nlmsg = NULL ;
2013-04-15 21:48:12 +02:00
2015-10-20 09:27:16 +02:00
_LOGD ( " link: change %d: mtu: %u " , ifindex , ( unsigned ) mtu ) ;
2014-03-05 10:56:16 +01:00
2015-10-20 09:27:16 +02:00
nlmsg = _nl_msg_new_link ( RTM_NEWLINK ,
0 ,
ifindex ,
NULL ,
2015-11-02 14:27:22 +01:00
0 ,
0 ) ;
2015-10-20 09:27:16 +02:00
if ( ! nlmsg )
return FALSE ;
NLA_PUT_U32 ( nlmsg , IFLA_MTU , mtu ) ;
return do_change_link ( platform , ifindex , nlmsg ) = = NM_PLATFORM_ERROR_SUCCESS ;
nla_put_failure :
g_return_val_if_reached ( FALSE ) ;
2013-04-15 21:48:12 +02:00
}
2013-10-11 14:59:26 -04:00
static char *
link_get_physical_port_id ( NMPlatform * platform , int ifindex )
{
2016-12-09 12:47:17 +01:00
nm_auto_close int dirfd = - 1 ;
char ifname_verified [ IFNAMSIZ ] ;
2013-10-11 14:59:26 -04:00
2016-12-09 12:47:17 +01:00
dirfd = nm_platform_sysctl_open_netdir ( platform , ifindex , ifname_verified ) ;
if ( dirfd < 0 )
2013-10-11 14:59:26 -04:00
return NULL ;
2016-12-09 12:47:17 +01:00
return sysctl_get ( platform , NMP_SYSCTL_PATHID_NETDIR ( dirfd , ifname_verified , " phys_port_id " ) ) ;
2013-10-11 14:59:26 -04:00
}
2015-03-24 12:35:36 -05:00
static guint
link_get_dev_id ( NMPlatform * platform , int ifindex )
{
2016-12-09 12:47:17 +01:00
nm_auto_close int dirfd = - 1 ;
char ifname_verified [ IFNAMSIZ ] ;
2015-03-24 12:35:36 -05:00
2016-12-09 12:47:17 +01:00
dirfd = nm_platform_sysctl_open_netdir ( platform , ifindex , ifname_verified ) ;
if ( dirfd < 0 )
2015-03-24 12:35:36 -05:00
return 0 ;
2016-12-09 12:47:17 +01:00
return nm_platform_sysctl_get_int_checked ( platform ,
NMP_SYSCTL_PATHID_NETDIR ( dirfd , ifname_verified , " dev_id " ) ,
16 , 0 , G_MAXUINT16 , 0 ) ;
2015-03-24 12:35:36 -05:00
}
2013-03-27 22:53:55 +01:00
static int
2014-09-18 12:53:19 -05:00
vlan_add ( NMPlatform * platform ,
const char * name ,
int parent ,
int vlan_id ,
guint32 vlan_flags ,
2015-12-09 15:13:57 +01:00
const NMPlatformLink * * out_link )
2013-03-27 22:53:55 +01:00
{
2015-10-20 09:27:16 +02:00
nm_auto_nlmsg struct nl_msg * nlmsg = NULL ;
2013-03-27 22:53:55 +01:00
2015-10-27 18:24:13 +01:00
G_STATIC_ASSERT ( NM_VLAN_FLAG_REORDER_HEADERS = = ( guint32 ) VLAN_FLAG_REORDER_HDR ) ;
G_STATIC_ASSERT ( NM_VLAN_FLAG_GVRP = = ( guint32 ) VLAN_FLAG_GVRP ) ;
G_STATIC_ASSERT ( NM_VLAN_FLAG_LOOSE_BINDING = = ( guint32 ) VLAN_FLAG_LOOSE_BINDING ) ;
G_STATIC_ASSERT ( NM_VLAN_FLAG_MVRP = = ( guint32 ) VLAN_FLAG_MVRP ) ;
2015-10-27 17:24:11 +01:00
2015-10-20 09:27:16 +02:00
vlan_flags & = ( guint32 ) NM_VLAN_FLAGS_ALL ;
2013-03-27 22:53:55 +01:00
2015-10-20 09:27:16 +02:00
_LOGD ( " link: add vlan '%s', parent %d, vlan id %d, flags %X " ,
name , parent , vlan_id , ( unsigned int ) vlan_flags ) ;
nlmsg = _nl_msg_new_link ( RTM_NEWLINK ,
2016-04-08 15:05:35 +02:00
NLM_F_CREATE | NLM_F_EXCL ,
2015-10-20 09:27:16 +02:00
0 ,
name ,
2015-11-02 14:27:22 +01:00
0 ,
2015-10-20 09:27:16 +02:00
0 ) ;
if ( ! nlmsg )
return FALSE ;
2013-03-27 22:53:55 +01:00
2015-10-20 09:27:16 +02:00
NLA_PUT_U32 ( nlmsg , IFLA_LINK , parent ) ;
2014-03-05 10:56:16 +01:00
2015-10-20 09:27:16 +02:00
if ( ! _nl_msg_new_link_set_linkinfo_vlan ( nlmsg ,
vlan_id ,
NM_VLAN_FLAGS_ALL ,
vlan_flags ,
NULL ,
0 ,
NULL ,
0 ) )
return FALSE ;
return do_add_link_with_lookup ( platform , NM_LINK_TYPE_VLAN , name , nlmsg , out_link ) ;
nla_put_failure :
g_return_val_if_reached ( FALSE ) ;
2013-03-27 22:53:55 +01:00
}
2015-09-01 22:11:47 +02:00
static int
link_gre_add ( NMPlatform * platform ,
const char * name ,
2015-12-11 13:34:50 +01:00
const NMPlatformLnkGre * props ,
2015-12-09 15:13:57 +01:00
const NMPlatformLink * * out_link )
2015-09-01 22:11:47 +02:00
{
nm_auto_nlmsg struct nl_msg * nlmsg = NULL ;
struct nlattr * info ;
struct nlattr * data ;
char buffer [ INET_ADDRSTRLEN ] ;
_LOGD ( LOG_FMT_IP_TUNNEL ,
" gre " ,
name ,
props - > parent_ifindex ,
nm_utils_inet4_ntop ( props - > local , NULL ) ,
nm_utils_inet4_ntop ( props - > remote , buffer ) ) ;
nlmsg = _nl_msg_new_link ( RTM_NEWLINK ,
2016-04-08 15:05:35 +02:00
NLM_F_CREATE | NLM_F_EXCL ,
2015-09-01 22:11:47 +02:00
0 ,
name ,
0 ,
0 ) ;
if ( ! nlmsg )
return FALSE ;
if ( ! ( info = nla_nest_start ( nlmsg , IFLA_LINKINFO ) ) )
goto nla_put_failure ;
NLA_PUT_STRING ( nlmsg , IFLA_INFO_KIND , " gre " ) ;
if ( ! ( data = nla_nest_start ( nlmsg , IFLA_INFO_DATA ) ) )
goto nla_put_failure ;
if ( props - > parent_ifindex )
NLA_PUT_U32 ( nlmsg , IFLA_GRE_LINK , props - > parent_ifindex ) ;
NLA_PUT_U32 ( nlmsg , IFLA_GRE_LOCAL , props - > local ) ;
NLA_PUT_U32 ( nlmsg , IFLA_GRE_REMOTE , props - > remote ) ;
NLA_PUT_U8 ( nlmsg , IFLA_GRE_TTL , props - > ttl ) ;
NLA_PUT_U8 ( nlmsg , IFLA_GRE_TOS , props - > tos ) ;
NLA_PUT_U8 ( nlmsg , IFLA_GRE_PMTUDISC , ! ! props - > path_mtu_discovery ) ;
NLA_PUT_U32 ( nlmsg , IFLA_GRE_IKEY , htonl ( props - > input_key ) ) ;
NLA_PUT_U32 ( nlmsg , IFLA_GRE_OKEY , htonl ( props - > output_key ) ) ;
NLA_PUT_U32 ( nlmsg , IFLA_GRE_IFLAGS , htons ( props - > input_flags ) ) ;
NLA_PUT_U32 ( nlmsg , IFLA_GRE_OFLAGS , htons ( props - > output_flags ) ) ;
nla_nest_end ( nlmsg , data ) ;
nla_nest_end ( nlmsg , info ) ;
return do_add_link_with_lookup ( platform , NM_LINK_TYPE_GRE , name , nlmsg , out_link ) ;
nla_put_failure :
g_return_val_if_reached ( FALSE ) ;
}
2015-11-27 22:22:25 +01:00
static int
link_ip6tnl_add ( NMPlatform * platform ,
const char * name ,
2015-12-11 13:34:50 +01:00
const NMPlatformLnkIp6Tnl * props ,
2015-12-09 15:13:57 +01:00
const NMPlatformLink * * out_link )
2015-11-27 22:22:25 +01:00
{
nm_auto_nlmsg struct nl_msg * nlmsg = NULL ;
struct nlattr * info ;
struct nlattr * data ;
char buffer [ INET_ADDRSTRLEN ] ;
guint32 flowinfo ;
_LOGD ( LOG_FMT_IP_TUNNEL ,
" ip6tnl " ,
name ,
props - > parent_ifindex ,
nm_utils_inet6_ntop ( & props - > local , NULL ) ,
nm_utils_inet6_ntop ( & props - > remote , buffer ) ) ;
nlmsg = _nl_msg_new_link ( RTM_NEWLINK ,
2016-04-08 15:05:35 +02:00
NLM_F_CREATE | NLM_F_EXCL ,
2015-11-27 22:22:25 +01:00
0 ,
name ,
0 ,
0 ) ;
if ( ! nlmsg )
return FALSE ;
if ( ! ( info = nla_nest_start ( nlmsg , IFLA_LINKINFO ) ) )
goto nla_put_failure ;
NLA_PUT_STRING ( nlmsg , IFLA_INFO_KIND , " ip6tnl " ) ;
if ( ! ( data = nla_nest_start ( nlmsg , IFLA_INFO_DATA ) ) )
goto nla_put_failure ;
if ( props - > parent_ifindex )
NLA_PUT_U32 ( nlmsg , IFLA_IPTUN_LINK , props - > parent_ifindex ) ;
if ( memcmp ( & props - > local , & in6addr_any , sizeof ( in6addr_any ) ) )
NLA_PUT ( nlmsg , IFLA_IPTUN_LOCAL , sizeof ( props - > local ) , & props - > local ) ;
if ( memcmp ( & props - > remote , & in6addr_any , sizeof ( in6addr_any ) ) )
NLA_PUT ( nlmsg , IFLA_IPTUN_REMOTE , sizeof ( props - > remote ) , & props - > remote ) ;
NLA_PUT_U8 ( nlmsg , IFLA_IPTUN_TTL , props - > ttl ) ;
NLA_PUT_U8 ( nlmsg , IFLA_IPTUN_ENCAP_LIMIT , props - > encap_limit ) ;
flowinfo = props - > flow_label & IP6_FLOWINFO_FLOWLABEL_MASK ;
flowinfo | = ( props - > tclass < < IP6_FLOWINFO_TCLASS_SHIFT )
& IP6_FLOWINFO_TCLASS_MASK ;
NLA_PUT_U32 ( nlmsg , IFLA_IPTUN_FLOWINFO , htonl ( flowinfo ) ) ;
NLA_PUT_U8 ( nlmsg , IFLA_IPTUN_PROTO , props - > proto ) ;
nla_nest_end ( nlmsg , data ) ;
nla_nest_end ( nlmsg , info ) ;
return do_add_link_with_lookup ( platform , NM_LINK_TYPE_IP6TNL , name , nlmsg , out_link ) ;
nla_put_failure :
g_return_val_if_reached ( FALSE ) ;
}
2015-11-27 14:01:56 +01:00
static int
link_ipip_add ( NMPlatform * platform ,
const char * name ,
2015-12-11 13:34:50 +01:00
const NMPlatformLnkIpIp * props ,
2015-12-09 15:13:57 +01:00
const NMPlatformLink * * out_link )
2015-11-27 14:01:56 +01:00
{
nm_auto_nlmsg struct nl_msg * nlmsg = NULL ;
struct nlattr * info ;
struct nlattr * data ;
char buffer [ INET_ADDRSTRLEN ] ;
_LOGD ( LOG_FMT_IP_TUNNEL ,
" ipip " ,
name ,
props - > parent_ifindex ,
nm_utils_inet4_ntop ( props - > local , NULL ) ,
nm_utils_inet4_ntop ( props - > remote , buffer ) ) ;
nlmsg = _nl_msg_new_link ( RTM_NEWLINK ,
2016-04-08 15:05:35 +02:00
NLM_F_CREATE | NLM_F_EXCL ,
2015-11-27 14:01:56 +01:00
0 ,
name ,
0 ,
0 ) ;
if ( ! nlmsg )
return FALSE ;
if ( ! ( info = nla_nest_start ( nlmsg , IFLA_LINKINFO ) ) )
goto nla_put_failure ;
NLA_PUT_STRING ( nlmsg , IFLA_INFO_KIND , " ipip " ) ;
if ( ! ( data = nla_nest_start ( nlmsg , IFLA_INFO_DATA ) ) )
goto nla_put_failure ;
if ( props - > parent_ifindex )
NLA_PUT_U32 ( nlmsg , IFLA_IPTUN_LINK , props - > parent_ifindex ) ;
NLA_PUT_U32 ( nlmsg , IFLA_IPTUN_LOCAL , props - > local ) ;
NLA_PUT_U32 ( nlmsg , IFLA_IPTUN_REMOTE , props - > remote ) ;
NLA_PUT_U8 ( nlmsg , IFLA_IPTUN_TTL , props - > ttl ) ;
NLA_PUT_U8 ( nlmsg , IFLA_IPTUN_TOS , props - > tos ) ;
NLA_PUT_U8 ( nlmsg , IFLA_IPTUN_PMTUDISC , ! ! props - > path_mtu_discovery ) ;
nla_nest_end ( nlmsg , data ) ;
nla_nest_end ( nlmsg , info ) ;
return do_add_link_with_lookup ( platform , NM_LINK_TYPE_IPIP , name , nlmsg , out_link ) ;
nla_put_failure :
g_return_val_if_reached ( FALSE ) ;
}
2016-06-30 18:20:09 +02:00
static int
link_macsec_add ( NMPlatform * platform ,
const char * name ,
int parent ,
const NMPlatformLnkMacsec * props ,
const NMPlatformLink * * out_link )
{
nm_auto_nlmsg struct nl_msg * nlmsg = NULL ;
struct nlattr * info ;
struct nlattr * data ;
_LOGD ( " adding macsec '%s' parent %u sci %llx " ,
name ,
parent ,
( unsigned long long ) props - > sci ) ;
nlmsg = _nl_msg_new_link ( RTM_NEWLINK ,
NLM_F_CREATE | NLM_F_EXCL ,
0 ,
name ,
0 ,
0 ) ;
if ( ! nlmsg )
return FALSE ;
NLA_PUT_U32 ( nlmsg , IFLA_LINK , parent ) ;
if ( ! ( info = nla_nest_start ( nlmsg , IFLA_LINKINFO ) ) )
goto nla_put_failure ;
NLA_PUT_STRING ( nlmsg , IFLA_INFO_KIND , " macsec " ) ;
if ( ! ( data = nla_nest_start ( nlmsg , IFLA_INFO_DATA ) ) )
goto nla_put_failure ;
if ( props - > icv_length )
NLA_PUT_U8 ( nlmsg , IFLA_MACSEC_ICV_LEN , 16 ) ;
if ( props - > cipher_suite )
NLA_PUT_U64 ( nlmsg , IFLA_MACSEC_CIPHER_SUITE , props - > cipher_suite ) ;
if ( props - > replay_protect )
NLA_PUT_U32 ( nlmsg , IFLA_MACSEC_WINDOW , props - > window ) ;
NLA_PUT_U64 ( nlmsg , IFLA_MACSEC_SCI , htobe64 ( props - > sci ) ) ;
NLA_PUT_U8 ( nlmsg , IFLA_MACSEC_ENCODING_SA , props - > encoding_sa ) ;
NLA_PUT_U8 ( nlmsg , IFLA_MACSEC_ENCRYPT , props - > encrypt ) ;
NLA_PUT_U8 ( nlmsg , IFLA_MACSEC_PROTECT , props - > protect ) ;
NLA_PUT_U8 ( nlmsg , IFLA_MACSEC_INC_SCI , props - > include_sci ) ;
NLA_PUT_U8 ( nlmsg , IFLA_MACSEC_ES , props - > es ) ;
NLA_PUT_U8 ( nlmsg , IFLA_MACSEC_SCB , props - > scb ) ;
NLA_PUT_U8 ( nlmsg , IFLA_MACSEC_REPLAY_PROTECT , props - > replay_protect ) ;
NLA_PUT_U8 ( nlmsg , IFLA_MACSEC_VALIDATION , props - > validation ) ;
nla_nest_end ( nlmsg , data ) ;
nla_nest_end ( nlmsg , info ) ;
return do_add_link_with_lookup ( platform ,
NM_LINK_TYPE_MACSEC ,
name , nlmsg , out_link ) ;
nla_put_failure :
g_return_val_if_reached ( FALSE ) ;
}
2015-12-03 17:09:50 +01:00
static int
link_macvlan_add ( NMPlatform * platform ,
const char * name ,
int parent ,
2015-12-11 13:34:50 +01:00
const NMPlatformLnkMacvlan * props ,
2015-12-09 15:13:57 +01:00
const NMPlatformLink * * out_link )
2015-12-03 17:09:50 +01:00
{
nm_auto_nlmsg struct nl_msg * nlmsg = NULL ;
struct nlattr * info ;
struct nlattr * data ;
2015-12-04 09:49:39 +01:00
_LOGD ( " adding %s '%s' parent %u mode %u " ,
props - > tap ? " macvtap " : " macvlan " ,
2015-12-03 17:09:50 +01:00
name ,
parent ,
props - > mode ) ;
nlmsg = _nl_msg_new_link ( RTM_NEWLINK ,
2016-04-08 15:05:35 +02:00
NLM_F_CREATE | NLM_F_EXCL ,
2015-12-03 17:09:50 +01:00
0 ,
name ,
0 ,
0 ) ;
if ( ! nlmsg )
return FALSE ;
NLA_PUT_U32 ( nlmsg , IFLA_LINK , parent ) ;
if ( ! ( info = nla_nest_start ( nlmsg , IFLA_LINKINFO ) ) )
goto nla_put_failure ;
2015-12-04 09:49:39 +01:00
NLA_PUT_STRING ( nlmsg , IFLA_INFO_KIND , props - > tap ? " macvtap " : " macvlan " ) ;
2015-12-03 17:09:50 +01:00
if ( ! ( data = nla_nest_start ( nlmsg , IFLA_INFO_DATA ) ) )
goto nla_put_failure ;
NLA_PUT_U32 ( nlmsg , IFLA_MACVLAN_MODE , props - > mode ) ;
NLA_PUT_U16 ( nlmsg , IFLA_MACVLAN_FLAGS , props - > no_promisc ? MACVLAN_FLAG_NOPROMISC : 0 ) ;
nla_nest_end ( nlmsg , data ) ;
nla_nest_end ( nlmsg , info ) ;
2015-12-04 09:49:39 +01:00
return do_add_link_with_lookup ( platform ,
props - > tap ? NM_LINK_TYPE_MACVTAP : NM_LINK_TYPE_MACVLAN ,
name , nlmsg , out_link ) ;
2015-12-03 17:09:50 +01:00
nla_put_failure :
g_return_val_if_reached ( FALSE ) ;
}
2015-11-11 18:41:48 +01:00
static int
link_sit_add ( NMPlatform * platform ,
const char * name ,
2015-12-11 13:34:50 +01:00
const NMPlatformLnkSit * props ,
2015-12-09 15:13:57 +01:00
const NMPlatformLink * * out_link )
2015-11-11 18:41:48 +01:00
{
nm_auto_nlmsg struct nl_msg * nlmsg = NULL ;
struct nlattr * info ;
struct nlattr * data ;
char buffer [ INET_ADDRSTRLEN ] ;
_LOGD ( LOG_FMT_IP_TUNNEL ,
" sit " ,
name ,
props - > parent_ifindex ,
nm_utils_inet4_ntop ( props - > local , NULL ) ,
nm_utils_inet4_ntop ( props - > remote , buffer ) ) ;
nlmsg = _nl_msg_new_link ( RTM_NEWLINK ,
2016-04-08 15:05:35 +02:00
NLM_F_CREATE | NLM_F_EXCL ,
2015-11-11 18:41:48 +01:00
0 ,
name ,
0 ,
0 ) ;
if ( ! nlmsg )
return FALSE ;
if ( ! ( info = nla_nest_start ( nlmsg , IFLA_LINKINFO ) ) )
goto nla_put_failure ;
NLA_PUT_STRING ( nlmsg , IFLA_INFO_KIND , " sit " ) ;
if ( ! ( data = nla_nest_start ( nlmsg , IFLA_INFO_DATA ) ) )
goto nla_put_failure ;
if ( props - > parent_ifindex )
NLA_PUT_U32 ( nlmsg , IFLA_IPTUN_LINK , props - > parent_ifindex ) ;
NLA_PUT_U32 ( nlmsg , IFLA_IPTUN_LOCAL , props - > local ) ;
NLA_PUT_U32 ( nlmsg , IFLA_IPTUN_REMOTE , props - > remote ) ;
NLA_PUT_U8 ( nlmsg , IFLA_IPTUN_TTL , props - > ttl ) ;
NLA_PUT_U8 ( nlmsg , IFLA_IPTUN_TOS , props - > tos ) ;
NLA_PUT_U8 ( nlmsg , IFLA_IPTUN_PMTUDISC , ! ! props - > path_mtu_discovery ) ;
nla_nest_end ( nlmsg , data ) ;
nla_nest_end ( nlmsg , info ) ;
return do_add_link_with_lookup ( platform , NM_LINK_TYPE_SIT , name , nlmsg , out_link ) ;
nla_put_failure :
g_return_val_if_reached ( FALSE ) ;
}
2015-10-14 10:01:48 +02:00
static gboolean
link_vxlan_add ( NMPlatform * platform ,
const char * name ,
2015-12-11 13:34:50 +01:00
const NMPlatformLnkVxlan * props ,
2015-12-09 15:13:57 +01:00
const NMPlatformLink * * out_link )
2015-10-14 10:01:48 +02:00
{
nm_auto_nlmsg struct nl_msg * nlmsg = NULL ;
struct nlattr * info ;
struct nlattr * data ;
struct nm_ifla_vxlan_port_range port_range ;
g_return_val_if_fail ( props , FALSE ) ;
_LOGD ( " link: add vxlan '%s', parent %d, vxlan id %d " ,
name , props - > parent_ifindex , props - > id ) ;
nlmsg = _nl_msg_new_link ( RTM_NEWLINK ,
2016-04-08 15:05:35 +02:00
NLM_F_CREATE | NLM_F_EXCL ,
2015-10-14 10:01:48 +02:00
0 ,
name ,
0 ,
0 ) ;
if ( ! nlmsg )
return FALSE ;
if ( ! ( info = nla_nest_start ( nlmsg , IFLA_LINKINFO ) ) )
goto nla_put_failure ;
NLA_PUT_STRING ( nlmsg , IFLA_INFO_KIND , " vxlan " ) ;
if ( ! ( data = nla_nest_start ( nlmsg , IFLA_INFO_DATA ) ) )
goto nla_put_failure ;
NLA_PUT_U32 ( nlmsg , IFLA_VXLAN_ID , props - > id ) ;
if ( props - > group )
NLA_PUT ( nlmsg , IFLA_VXLAN_GROUP , sizeof ( props - > group ) , & props - > group ) ;
else if ( memcmp ( & props - > group6 , & in6addr_any , sizeof ( in6addr_any ) ) )
NLA_PUT ( nlmsg , IFLA_VXLAN_GROUP6 , sizeof ( props - > group6 ) , & props - > group6 ) ;
if ( props - > local )
NLA_PUT ( nlmsg , IFLA_VXLAN_LOCAL , sizeof ( props - > local ) , & props - > local ) ;
else if ( memcmp ( & props - > local6 , & in6addr_any , sizeof ( in6addr_any ) ) )
NLA_PUT ( nlmsg , IFLA_VXLAN_LOCAL6 , sizeof ( props - > local6 ) , & props - > local6 ) ;
if ( props - > parent_ifindex > = 0 )
NLA_PUT_U32 ( nlmsg , IFLA_VXLAN_LINK , props - > parent_ifindex ) ;
if ( props - > src_port_min | | props - > src_port_max ) {
port_range . low = htons ( props - > src_port_min ) ;
port_range . high = htons ( props - > src_port_max ) ;
NLA_PUT ( nlmsg , IFLA_VXLAN_PORT_RANGE , sizeof ( port_range ) , & port_range ) ;
}
NLA_PUT_U16 ( nlmsg , IFLA_VXLAN_PORT , htons ( props - > dst_port ) ) ;
NLA_PUT_U8 ( nlmsg , IFLA_VXLAN_TOS , props - > tos ) ;
NLA_PUT_U8 ( nlmsg , IFLA_VXLAN_TTL , props - > ttl ) ;
NLA_PUT_U32 ( nlmsg , IFLA_VXLAN_AGEING , props - > ageing ) ;
NLA_PUT_U32 ( nlmsg , IFLA_VXLAN_LIMIT , props - > limit ) ;
NLA_PUT_U8 ( nlmsg , IFLA_VXLAN_LEARNING , ! ! props - > learning ) ;
NLA_PUT_U8 ( nlmsg , IFLA_VXLAN_PROXY , ! ! props - > proxy ) ;
NLA_PUT_U8 ( nlmsg , IFLA_VXLAN_RSC , ! ! props - > rsc ) ;
NLA_PUT_U8 ( nlmsg , IFLA_VXLAN_L2MISS , ! ! props - > l2miss ) ;
NLA_PUT_U8 ( nlmsg , IFLA_VXLAN_L3MISS , ! ! props - > l3miss ) ;
nla_nest_end ( nlmsg , data ) ;
nla_nest_end ( nlmsg , info ) ;
return do_add_link_with_lookup ( platform , NM_LINK_TYPE_VXLAN , name , nlmsg , out_link ) ;
nla_put_failure :
g_return_val_if_reached ( FALSE ) ;
}
2015-10-27 16:14:54 +01:00
static void
_vlan_change_vlan_qos_mapping_create ( gboolean is_ingress_map ,
gboolean reset_all ,
const NMVlanQosMapping * current_map ,
guint current_n_map ,
const NMVlanQosMapping * set_map ,
guint set_n_map ,
NMVlanQosMapping * * out_map ,
guint * out_n_map )
{
NMVlanQosMapping * map ;
guint i , j , len ;
const guint INGRESS_RANGE_LEN = 8 ;
nm_assert ( out_map & & ! * out_map ) ;
nm_assert ( out_n_map & & ! * out_n_map ) ;
if ( ! reset_all )
current_n_map = 0 ;
else if ( is_ingress_map )
current_n_map = INGRESS_RANGE_LEN ;
len = current_n_map + set_n_map ;
if ( len = = 0 )
return ;
2013-03-27 22:53:55 +01:00
2015-10-27 16:14:54 +01:00
map = g_new ( NMVlanQosMapping , len ) ;
2015-10-20 09:27:16 +02:00
2015-10-27 16:14:54 +01:00
if ( current_n_map ) {
if ( is_ingress_map ) {
/* For the ingress-map, there are only 8 entries (0 to 7).
* When the user requests to reset all entires , we don ' t actually
* need the cached entries , we can just explicitly clear all possible
* ones .
*
* That makes only a real difference in case our cache is out - of - date .
*
* For the egress map we cannot do that , because there are far too
* many . There we can only clear the entries that we know about . */
for ( i = 0 ; i < INGRESS_RANGE_LEN ; i + + ) {
map [ i ] . from = i ;
map [ i ] . to = 0 ;
}
} else {
for ( i = 0 ; i < current_n_map ; i + + ) {
map [ i ] . from = current_map [ i ] . from ;
map [ i ] . to = 0 ;
}
}
}
if ( set_n_map )
memcpy ( & map [ current_n_map ] , set_map , sizeof ( * set_map ) * set_n_map ) ;
g_qsort_with_data ( map ,
len ,
sizeof ( * map ) ,
_vlan_qos_mapping_cmp_from ,
NULL ) ;
for ( i = 0 , j = 0 ; i < len ; i + + ) {
if ( ( is_ingress_map & & ! VLAN_XGRESS_PRIO_VALID ( map [ i ] . from ) )
| | ( ! is_ingress_map & & ! VLAN_XGRESS_PRIO_VALID ( map [ i ] . to ) ) )
continue ;
if ( j > 0
& & map [ j - 1 ] . from = = map [ i ] . from )
map [ j - 1 ] = map [ i ] ;
else
map [ j + + ] = map [ i ] ;
}
2014-03-05 10:56:16 +01:00
2015-10-27 16:14:54 +01:00
* out_map = map ;
* out_n_map = j ;
2013-03-27 22:53:55 +01:00
}
static gboolean
2015-10-27 16:14:54 +01:00
link_vlan_change ( NMPlatform * platform ,
int ifindex ,
NMVlanFlags flags_mask ,
NMVlanFlags flags_set ,
gboolean ingress_reset_all ,
const NMVlanQosMapping * ingress_map ,
gsize n_ingress_map ,
gboolean egress_reset_all ,
const NMVlanQosMapping * egress_map ,
gsize n_egress_map )
2013-03-27 22:53:55 +01:00
{
2015-10-27 16:14:54 +01:00
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
const NMPObject * obj_cache ;
2015-10-20 09:27:16 +02:00
nm_auto_nlmsg struct nl_msg * nlmsg = NULL ;
2015-10-27 16:14:54 +01:00
const NMPObjectLnkVlan * lnk ;
guint new_n_ingress_map = 0 ;
guint new_n_egress_map = 0 ;
gs_free NMVlanQosMapping * new_ingress_map = NULL ;
gs_free NMVlanQosMapping * new_egress_map = NULL ;
char s_flags [ 64 ] ;
char s_ingress [ 256 ] ;
char s_egress [ 256 ] ;
2013-03-27 22:53:55 +01:00
2015-10-27 16:14:54 +01:00
obj_cache = nmp_cache_lookup_link ( priv - > cache , ifindex ) ;
if ( ! obj_cache
| | ! obj_cache - > _link . netlink . is_in_netlink ) {
_LOGD ( " link: change %d: %s: link does not exist " , ifindex , " vlan " ) ;
2015-10-20 09:27:16 +02:00
return FALSE ;
2015-10-27 16:14:54 +01:00
}
2013-03-27 22:53:55 +01:00
2015-10-27 16:14:54 +01:00
lnk = obj_cache - > _link . netlink . lnk ? & obj_cache - > _link . netlink . lnk - > _lnk_vlan : NULL ;
flags_set & = flags_mask ;
_vlan_change_vlan_qos_mapping_create ( TRUE ,
ingress_reset_all ,
lnk ? lnk - > ingress_qos_map : NULL ,
lnk ? lnk - > n_ingress_qos_map : 0 ,
ingress_map ,
n_ingress_map ,
& new_ingress_map ,
& new_n_ingress_map ) ;
_vlan_change_vlan_qos_mapping_create ( FALSE ,
egress_reset_all ,
lnk ? lnk - > egress_qos_map : NULL ,
lnk ? lnk - > n_egress_qos_map : 0 ,
egress_map ,
n_egress_map ,
& new_egress_map ,
& new_n_egress_map ) ;
_LOGD ( " link: change %d: vlan:%s%s%s " ,
ifindex ,
flags_mask
? nm_sprintf_buf ( s_flags , " flags 0x%x/0x%x " , ( unsigned ) flags_set , ( unsigned ) flags_mask )
: " " ,
new_n_ingress_map
? nm_platform_vlan_qos_mapping_to_string ( " ingress-qos-map " ,
new_ingress_map ,
new_n_ingress_map ,
s_ingress ,
sizeof ( s_ingress ) )
: " " ,
new_n_egress_map
? nm_platform_vlan_qos_mapping_to_string ( " egress-qos-map " ,
new_egress_map ,
new_n_egress_map ,
s_egress ,
sizeof ( s_egress ) )
: " " ) ;
2015-10-20 09:27:16 +02:00
nlmsg = _nl_msg_new_link ( RTM_NEWLINK ,
0 ,
ifindex ,
NULL ,
2015-11-02 14:27:22 +01:00
0 ,
0 ) ;
2015-10-20 09:27:16 +02:00
if ( ! nlmsg
| | ! _nl_msg_new_link_set_linkinfo_vlan ( nlmsg ,
- 1 ,
2015-10-27 16:14:54 +01:00
flags_mask ,
flags_set ,
new_ingress_map ,
new_n_ingress_map ,
new_egress_map ,
new_n_egress_map ) )
2015-12-14 19:18:35 +01:00
g_return_val_if_reached ( FALSE ) ;
2014-03-05 10:56:16 +01:00
2015-10-20 09:27:16 +02:00
return do_change_link ( platform , ifindex , nlmsg ) = = NM_PLATFORM_ERROR_SUCCESS ;
2013-03-27 22:53:55 +01:00
}
2015-09-15 15:07:37 +02:00
static int
tun_add ( NMPlatform * platform , const char * name , gboolean tap ,
gint64 owner , gint64 group , gboolean pi , gboolean vnet_hdr ,
2015-12-09 15:13:57 +01:00
gboolean multi_queue , const NMPlatformLink * * out_link )
2015-09-15 15:07:37 +02:00
{
2015-12-09 16:12:37 +01:00
const NMPObject * obj ;
struct ifreq ifr = { } ;
int fd ;
_LOGD ( " link: add %s '%s' owner % " G_GINT64_FORMAT " group % " G_GINT64_FORMAT ,
tap ? " tap " : " tun " , name , owner , group ) ;
2016-12-10 15:28:15 +01:00
fd = open ( " /dev/net/tun " , O_RDWR | O_CLOEXEC ) ;
2015-12-09 16:12:37 +01:00
if ( fd < 0 )
return FALSE ;
2016-03-07 11:11:59 +01:00
nm_utils_ifname_cpy ( ifr . ifr_name , name ) ;
2015-12-09 16:12:37 +01:00
ifr . ifr_flags = tap ? IFF_TAP : IFF_TUN ;
if ( ! pi )
ifr . ifr_flags | = IFF_NO_PI ;
if ( vnet_hdr )
ifr . ifr_flags | = IFF_VNET_HDR ;
if ( multi_queue )
ifr . ifr_flags | = NM_IFF_MULTI_QUEUE ;
if ( ioctl ( fd , TUNSETIFF , & ifr ) ) {
close ( fd ) ;
return FALSE ;
}
if ( owner > = 0 & & owner < G_MAXINT32 ) {
if ( ioctl ( fd , TUNSETOWNER , ( uid_t ) owner ) ) {
close ( fd ) ;
return FALSE ;
}
}
if ( group > = 0 & & group < G_MAXINT32 ) {
if ( ioctl ( fd , TUNSETGROUP , ( gid_t ) group ) ) {
close ( fd ) ;
return FALSE ;
}
}
if ( ioctl ( fd , TUNSETPERSIST , 1 ) ) {
close ( fd ) ;
return FALSE ;
}
2015-12-14 14:47:41 +01:00
do_request_link ( platform , 0 , name ) ;
2015-12-09 16:12:37 +01:00
obj = nmp_cache_lookup_link_full ( NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) - > cache ,
0 , name , FALSE ,
tap ? NM_LINK_TYPE_TAP : NM_LINK_TYPE_TUN ,
NULL , NULL ) ;
if ( out_link )
* out_link = obj ? & obj - > link : NULL ;
2016-01-21 16:31:29 +01:00
close ( fd ) ;
2015-12-09 16:12:37 +01:00
return ! ! obj ;
2015-09-15 15:07:37 +02:00
}
2013-03-27 22:53:55 +01:00
static gboolean
2013-08-02 00:43:12 +02:00
link_enslave ( NMPlatform * platform , int master , int slave )
2013-03-27 22:53:55 +01:00
{
2015-10-20 09:27:16 +02:00
nm_auto_nlmsg struct nl_msg * nlmsg = NULL ;
int ifindex = slave ;
_LOGD ( " link: change %d: enslave: master %d " , slave , master ) ;
2013-03-27 22:53:55 +01:00
2015-10-20 09:27:16 +02:00
nlmsg = _nl_msg_new_link ( RTM_NEWLINK ,
0 ,
ifindex ,
NULL ,
2015-11-02 14:27:22 +01:00
0 ,
0 ) ;
2015-10-20 09:27:16 +02:00
if ( ! nlmsg )
return FALSE ;
2014-03-05 10:56:16 +01:00
2015-10-20 09:27:16 +02:00
NLA_PUT_U32 ( nlmsg , IFLA_MASTER , master ) ;
return do_change_link ( platform , ifindex , nlmsg ) = = NM_PLATFORM_ERROR_SUCCESS ;
nla_put_failure :
g_return_val_if_reached ( FALSE ) ;
2013-03-27 22:53:55 +01:00
}
static gboolean
link_release ( NMPlatform * platform , int master , int slave )
{
2013-08-02 00:43:12 +02:00
return link_enslave ( platform , 0 , slave ) ;
2013-03-27 22:53:55 +01:00
}
2016-10-02 18:22:50 +02:00
/*****************************************************************************/
2015-09-04 11:50:41 +02:00
2013-06-10 16:21:08 -03:00
static gboolean
2016-04-20 12:06:43 +02:00
_infiniband_partition_action ( NMPlatform * platform ,
InfinibandAction action ,
int parent ,
int p_key ,
const NMPlatformLink * * out_link )
2013-06-10 16:21:08 -03:00
{
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
2016-12-09 12:47:17 +01:00
nm_auto_close int dirfd = - 1 ;
char ifname_parent [ IFNAMSIZ ] ;
2016-04-20 12:06:43 +02:00
const NMPObject * obj ;
2016-04-20 11:50:45 +02:00
char id [ 20 ] ;
2016-04-20 12:06:43 +02:00
char name [ IFNAMSIZ ] ;
gboolean success ;
2013-06-10 16:21:08 -03:00
2016-04-20 12:06:43 +02:00
nm_assert ( NM_IN_SET ( action , INFINIBAND_ACTION_CREATE_CHILD , INFINIBAND_ACTION_DELETE_CHILD ) ) ;
2016-04-20 11:44:23 +02:00
nm_assert ( p_key > 0 & & p_key < = 0xffff & & p_key ! = 0x8000 ) ;
2016-12-09 12:47:17 +01:00
dirfd = nm_platform_sysctl_open_netdir ( platform , parent , ifname_parent ) ;
if ( dirfd < 0 ) {
2016-04-20 11:28:38 +02:00
errno = ENOENT ;
return FALSE ;
}
2013-06-10 16:21:08 -03:00
2016-04-20 11:50:45 +02:00
nm_sprintf_buf ( id , " 0x%04x " , p_key ) ;
2016-12-09 12:47:17 +01:00
if ( action = = INFINIBAND_ACTION_CREATE_CHILD )
success = nm_platform_sysctl_set ( platform , NMP_SYSCTL_PATHID_NETDIR ( dirfd , ifname_parent , " create_child " ) , id ) ;
else
success = nm_platform_sysctl_set ( platform , NMP_SYSCTL_PATHID_NETDIR ( dirfd , ifname_parent , " delete_child " ) , id ) ;
2016-04-20 12:06:43 +02:00
if ( ! success ) {
if ( action = = INFINIBAND_ACTION_DELETE_CHILD
& & errno = = ENODEV )
return TRUE ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
return FALSE ;
2016-04-20 12:06:43 +02:00
}
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
2016-12-09 12:47:17 +01:00
nm_utils_new_infiniband_name ( name , ifname_parent , p_key ) ;
2016-04-20 11:28:38 +02:00
do_request_link ( platform , 0 , name ) ;
2013-06-10 16:21:08 -03:00
2016-04-20 12:06:43 +02:00
if ( action = = INFINIBAND_ACTION_DELETE_CHILD )
return TRUE ;
obj = nmp_cache_lookup_link_full ( priv - > cache , 0 , name , FALSE ,
NM_LINK_TYPE_INFINIBAND , NULL , NULL ) ;
2015-12-09 15:13:57 +01:00
if ( out_link )
* out_link = obj ? & obj - > link : NULL ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
return ! ! obj ;
2013-06-10 16:21:08 -03:00
}
2016-04-20 09:16:21 +02:00
static gboolean
2016-04-20 12:06:43 +02:00
infiniband_partition_add ( NMPlatform * platform , int parent , int p_key , const NMPlatformLink * * out_link )
2016-04-20 09:16:21 +02:00
{
2016-04-20 12:06:43 +02:00
return _infiniband_partition_action ( platform , INFINIBAND_ACTION_CREATE_CHILD , parent , p_key , out_link ) ;
}
2016-04-20 09:16:21 +02:00
2016-04-20 12:06:43 +02:00
static gboolean
infiniband_partition_delete ( NMPlatform * platform , int parent , int p_key )
{
return _infiniband_partition_action ( platform , INFINIBAND_ACTION_DELETE_CHILD , parent , p_key , NULL ) ;
2016-04-20 09:16:21 +02:00
}
2016-10-02 18:22:50 +02:00
/*****************************************************************************/
2015-09-04 11:50:41 +02:00
2014-02-04 14:27:03 +01:00
static WifiData *
wifi_get_wifi_data ( NMPlatform * platform , int ifindex )
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
2016-07-06 17:59:08 +02:00
const NMPlatformLink * pllink ;
2014-02-04 14:27:03 +01:00
WifiData * wifi_data ;
wifi_data = g_hash_table_lookup ( priv - > wifi_data , GINT_TO_POINTER ( ifindex ) ) ;
2016-07-06 17:59:08 +02:00
pllink = nm_platform_link_get ( platform , ifindex ) ;
/* @wifi_data contains an interface name which is used for WEXT queries. If
* the interface name changes we should at least replace the name in the
* existing structure ; but probably a complete reinitialization is better
* because during the initial creation there can be race conditions while
* the interface is renamed by udev .
*/
if ( wifi_data & & pllink ) {
if ( ! nm_streq ( wifi_utils_get_iface ( wifi_data ) , pllink - > name ) ) {
_LOGD ( " wifi: interface %s renamed to %s, dropping old data for ifindex %d " ,
wifi_utils_get_iface ( wifi_data ) ,
pllink - > name ,
ifindex ) ;
g_hash_table_remove ( priv - > wifi_data , GINT_TO_POINTER ( ifindex ) ) ;
wifi_data = NULL ;
}
}
2015-06-20 12:05:01 +02:00
2016-07-06 17:59:08 +02:00
if ( ! wifi_data ) {
2015-06-20 12:05:01 +02:00
if ( pllink ) {
if ( pllink - > type = = NM_LINK_TYPE_WIFI )
wifi_data = wifi_utils_init ( pllink - > name , ifindex , TRUE ) ;
else if ( pllink - > type = = NM_LINK_TYPE_OLPC_MESH ) {
/* The kernel driver now uses nl80211, but we force use of WEXT because
* the cfg80211 interactions are not quite ready to support access to
* mesh control through nl80211 just yet .
*/
2014-02-04 14:27:03 +01:00
# if HAVE_WEXT
2015-06-20 12:05:01 +02:00
wifi_data = wifi_wext_init ( pllink - > name , ifindex , FALSE ) ;
2014-02-04 14:27:03 +01:00
# endif
2015-06-20 12:05:01 +02:00
}
2014-02-04 14:27:03 +01:00
2015-06-20 12:05:01 +02:00
if ( wifi_data )
g_hash_table_insert ( priv - > wifi_data , GINT_TO_POINTER ( ifindex ) , wifi_data ) ;
}
2014-02-04 14:27:03 +01:00
}
return wifi_data ;
}
2016-02-19 01:06:28 +01:00
# define WIFI_GET_WIFI_DATA_NETNS(wifi_data, platform, ifindex, retval) \
nm_auto_pop_netns NMPNetns * netns = NULL ; \
WifiData * wifi_data ; \
if ( ! nm_platform_netns_push ( platform , & netns ) ) \
return retval ; \
wifi_data = wifi_get_wifi_data ( platform , ifindex ) ; \
if ( ! wifi_data ) \
return retval ;
2014-02-04 14:27:03 +01:00
static gboolean
wifi_get_capabilities ( NMPlatform * platform , int ifindex , NMDeviceWifiCapabilities * caps )
{
2016-02-19 01:06:28 +01:00
WIFI_GET_WIFI_DATA_NETNS ( wifi_data , platform , ifindex , FALSE ) ;
2014-02-04 14:27:03 +01:00
if ( caps )
* caps = wifi_utils_get_caps ( wifi_data ) ;
return TRUE ;
}
static gboolean
2014-07-07 12:04:14 -04:00
wifi_get_bssid ( NMPlatform * platform , int ifindex , guint8 * bssid )
2014-02-04 14:27:03 +01:00
{
2016-02-19 01:06:28 +01:00
WIFI_GET_WIFI_DATA_NETNS ( wifi_data , platform , ifindex , FALSE ) ;
2014-02-04 14:27:03 +01:00
return wifi_utils_get_bssid ( wifi_data , bssid ) ;
}
static guint32
wifi_get_frequency ( NMPlatform * platform , int ifindex )
{
2016-02-19 01:06:28 +01:00
WIFI_GET_WIFI_DATA_NETNS ( wifi_data , platform , ifindex , 0 ) ;
2014-02-04 14:27:03 +01:00
return wifi_utils_get_freq ( wifi_data ) ;
}
static gboolean
wifi_get_quality ( NMPlatform * platform , int ifindex )
{
2016-02-19 01:06:28 +01:00
WIFI_GET_WIFI_DATA_NETNS ( wifi_data , platform , ifindex , FALSE ) ;
2014-02-04 14:27:03 +01:00
return wifi_utils_get_qual ( wifi_data ) ;
}
static guint32
wifi_get_rate ( NMPlatform * platform , int ifindex )
{
2016-02-19 01:06:28 +01:00
WIFI_GET_WIFI_DATA_NETNS ( wifi_data , platform , ifindex , FALSE ) ;
2014-02-04 14:27:03 +01:00
return wifi_utils_get_rate ( wifi_data ) ;
}
static NM80211Mode
wifi_get_mode ( NMPlatform * platform , int ifindex )
{
2016-02-19 01:06:28 +01:00
WIFI_GET_WIFI_DATA_NETNS ( wifi_data , platform , ifindex , NM_802_11_MODE_UNKNOWN ) ;
2014-02-04 14:27:03 +01:00
return wifi_utils_get_mode ( wifi_data ) ;
}
static void
wifi_set_mode ( NMPlatform * platform , int ifindex , NM80211Mode mode )
{
2016-02-19 01:06:28 +01:00
WIFI_GET_WIFI_DATA_NETNS ( wifi_data , platform , ifindex , ) ;
wifi_utils_set_mode ( wifi_data , mode ) ;
2014-02-04 14:27:03 +01:00
}
2014-10-23 14:19:59 -04:00
static void
wifi_set_powersave ( NMPlatform * platform , int ifindex , guint32 powersave )
{
2016-02-19 01:06:28 +01:00
WIFI_GET_WIFI_DATA_NETNS ( wifi_data , platform , ifindex , ) ;
wifi_utils_set_powersave ( wifi_data , powersave ) ;
2014-10-23 14:19:59 -04:00
}
2014-02-04 14:27:03 +01:00
static guint32
wifi_find_frequency ( NMPlatform * platform , int ifindex , const guint32 * freqs )
{
2016-02-19 01:06:28 +01:00
WIFI_GET_WIFI_DATA_NETNS ( wifi_data , platform , ifindex , 0 ) ;
2014-02-04 14:27:03 +01:00
return wifi_utils_find_freq ( wifi_data , freqs ) ;
}
static void
wifi_indicate_addressing_running ( NMPlatform * platform , int ifindex , gboolean running )
{
2016-02-19 01:06:28 +01:00
WIFI_GET_WIFI_DATA_NETNS ( wifi_data , platform , ifindex , ) ;
wifi_utils_indicate_addressing_running ( wifi_data , running ) ;
2014-02-04 14:27:03 +01:00
}
2016-10-02 18:22:50 +02:00
/*****************************************************************************/
2014-02-04 14:27:03 +01:00
2016-03-21 15:22:10 +01:00
static gboolean
link_can_assume ( NMPlatform * platform , int ifindex )
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
NMPCacheId cache_id ;
const NMPlatformObject * const * objs ;
guint i , len ;
const NMPObject * link ;
if ( ifindex < = 0 )
return FALSE ;
link = cache_lookup_link ( platform , ifindex ) ;
if ( ! link )
return FALSE ;
if ( ! NM_FLAGS_HAS ( link - > link . n_ifi_flags , IFF_UP ) )
return FALSE ;
if ( link - > link . master > 0 )
return TRUE ;
if ( nmp_cache_lookup_multi ( priv - > cache ,
nmp_cache_id_init_addrroute_visible_by_ifindex ( & cache_id , NMP_OBJECT_TYPE_IP4_ADDRESS , ifindex ) ,
NULL ) )
return TRUE ;
objs = nmp_cache_lookup_multi ( priv - > cache ,
nmp_cache_id_init_addrroute_visible_by_ifindex ( & cache_id , NMP_OBJECT_TYPE_IP6_ADDRESS , ifindex ) ,
& len ) ;
if ( objs ) {
for ( i = 0 ; i < len ; i + + ) {
const NMPlatformIP6Address * a = ( NMPlatformIP6Address * ) objs [ i ] ;
if ( ! IN6_IS_ADDR_LINKLOCAL ( & a - > address ) )
return TRUE ;
}
}
return FALSE ;
}
2016-10-02 18:22:50 +02:00
/*****************************************************************************/
2016-03-21 15:22:10 +01:00
2014-02-04 14:27:03 +01:00
static guint32
mesh_get_channel ( NMPlatform * platform , int ifindex )
{
2016-02-19 01:06:28 +01:00
WIFI_GET_WIFI_DATA_NETNS ( wifi_data , platform , ifindex , 0 ) ;
2014-02-04 14:27:03 +01:00
return wifi_utils_get_mesh_channel ( wifi_data ) ;
}
static gboolean
mesh_set_channel ( NMPlatform * platform , int ifindex , guint32 channel )
{
2016-02-19 01:06:28 +01:00
WIFI_GET_WIFI_DATA_NETNS ( wifi_data , platform , ifindex , FALSE ) ;
2014-02-04 14:27:03 +01:00
return wifi_utils_set_mesh_channel ( wifi_data , channel ) ;
}
static gboolean
2014-06-26 10:42:11 -04:00
mesh_set_ssid ( NMPlatform * platform , int ifindex , const guint8 * ssid , gsize len )
2014-02-04 14:27:03 +01:00
{
2016-02-19 01:06:28 +01:00
WIFI_GET_WIFI_DATA_NETNS ( wifi_data , platform , ifindex , FALSE ) ;
2014-06-26 10:42:11 -04:00
return wifi_utils_set_mesh_ssid ( wifi_data , ssid , len ) ;
2014-02-04 14:27:03 +01:00
}
2016-10-02 18:22:50 +02:00
/*****************************************************************************/
2015-09-04 11:50:41 +02:00
2014-02-05 11:56:44 +01:00
static gboolean
link_get_wake_on_lan ( NMPlatform * platform , int ifindex )
{
2016-02-19 01:06:28 +01:00
nm_auto_pop_netns NMPNetns * netns = NULL ;
2015-06-20 12:05:01 +02:00
NMLinkType type = nm_platform_link_get_type ( platform , ifindex ) ;
2014-02-05 11:56:44 +01:00
2016-02-19 01:06:28 +01:00
if ( ! nm_platform_netns_push ( platform , & netns ) )
return FALSE ;
2015-05-03 10:15:57 +02:00
if ( type = = NM_LINK_TYPE_ETHERNET )
2016-12-11 22:46:14 +01:00
return nmp_utils_ethtool_get_wake_on_lan ( ifindex ) ;
2015-05-03 10:15:57 +02:00
else if ( type = = NM_LINK_TYPE_WIFI ) {
2014-02-05 11:56:44 +01:00
WifiData * wifi_data = wifi_get_wifi_data ( platform , ifindex ) ;
if ( ! wifi_data )
return FALSE ;
return wifi_utils_get_wowlan ( wifi_data ) ;
} else
return FALSE ;
}
2014-10-03 13:41:49 -05:00
static gboolean
link_get_driver_info ( NMPlatform * platform ,
int ifindex ,
char * * out_driver_name ,
char * * out_driver_version ,
char * * out_fw_version )
{
2016-02-19 01:06:28 +01:00
nm_auto_pop_netns NMPNetns * netns = NULL ;
2016-12-12 13:47:52 +01:00
NMPUtilsEthtoolDriverInfo driver_info ;
2016-02-19 01:06:28 +01:00
if ( ! nm_platform_netns_push ( platform , & netns ) )
return FALSE ;
2016-12-12 13:47:52 +01:00
if ( ! nmp_utils_ethtool_get_driver_info ( ifindex , & driver_info ) )
return FALSE ;
NM_SET_OUT ( out_driver_name , g_strdup ( driver_info . driver ) ) ;
NM_SET_OUT ( out_driver_version , g_strdup ( driver_info . version ) ) ;
NM_SET_OUT ( out_fw_version , g_strdup ( driver_info . fw_version ) ) ;
return TRUE ;
2014-10-03 13:41:49 -05:00
}
2016-10-02 18:22:50 +02:00
/*****************************************************************************/
2013-03-27 22:23:24 +01:00
2015-04-06 18:29:36 +02:00
static GArray *
2015-06-19 16:24:18 +02:00
ipx_address_get_all ( NMPlatform * platform , int ifindex , NMPObjectType obj_type )
2015-04-06 18:29:36 +02:00
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
2016-12-01 10:56:09 +01:00
NMPCacheId cache_id ;
2015-06-18 11:44:36 +02:00
2015-06-19 16:24:18 +02:00
nm_assert ( NM_IN_SET ( obj_type , NMP_OBJECT_TYPE_IP4_ADDRESS , NMP_OBJECT_TYPE_IP6_ADDRESS ) ) ;
2015-04-06 18:29:36 +02:00
return nmp_cache_lookup_multi_to_array ( priv - > cache ,
obj_type ,
2016-12-01 10:56:09 +01:00
nmp_cache_id_init_addrroute_visible_by_ifindex ( & cache_id ,
2015-06-18 11:44:36 +02:00
obj_type ,
ifindex ) ) ;
2015-04-06 18:29:36 +02:00
}
2013-03-27 22:23:24 +01:00
static GArray *
ip4_address_get_all ( NMPlatform * platform , int ifindex )
{
2015-06-19 16:24:18 +02:00
return ipx_address_get_all ( platform , ifindex , NMP_OBJECT_TYPE_IP4_ADDRESS ) ;
2013-03-27 22:23:24 +01:00
}
static GArray *
ip6_address_get_all ( NMPlatform * platform , int ifindex )
{
2015-06-19 16:24:18 +02:00
return ipx_address_get_all ( platform , ifindex , NMP_OBJECT_TYPE_IP6_ADDRESS ) ;
2013-03-27 22:23:24 +01:00
}
static gboolean
2013-12-02 10:20:26 -05:00
ip4_address_add ( NMPlatform * platform ,
int ifindex ,
in_addr_t addr ,
2016-04-06 18:04:26 +02:00
guint8 plen ,
2015-10-10 19:58:59 +02:00
in_addr_t peer_addr ,
2013-12-02 10:20:26 -05:00
guint32 lifetime ,
2014-02-19 16:10:59 -05:00
guint32 preferred ,
2016-02-29 17:06:21 +01:00
guint32 flags ,
2014-02-19 16:10:59 -05:00
const char * label )
2013-03-27 22:23:24 +01:00
{
2015-10-26 09:19:15 +01:00
NMPObject obj_id ;
2015-10-20 09:27:16 +02:00
nm_auto_nlmsg struct nl_msg * nlmsg = NULL ;
nlmsg = _nl_msg_new_address ( RTM_NEWADDR ,
NLM_F_CREATE | NLM_F_REPLACE ,
AF_INET ,
ifindex ,
& addr ,
plen ,
& peer_addr ,
2016-02-29 17:06:21 +01:00
flags ,
2016-03-07 11:45:44 +01:00
nm_utils_ip4_address_is_link_local ( addr ) ? RT_SCOPE_LINK : RT_SCOPE_UNIVERSE ,
2015-10-20 09:27:16 +02:00
lifetime ,
preferred ,
label ) ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
2015-10-20 09:27:16 +02:00
nmp_object_stackinit_id_ip4_address ( & obj_id , ifindex , addr , plen , peer_addr ) ;
return do_add_addrroute ( platform , & obj_id , nlmsg ) ;
2013-03-27 22:23:24 +01:00
}
static gboolean
2013-12-02 10:20:26 -05:00
ip6_address_add ( NMPlatform * platform ,
int ifindex ,
struct in6_addr addr ,
2016-04-06 18:04:26 +02:00
guint8 plen ,
2015-10-10 19:58:59 +02:00
struct in6_addr peer_addr ,
2013-12-02 10:20:26 -05:00
guint32 lifetime ,
guint32 preferred ,
2016-02-29 15:36:12 +01:00
guint32 flags )
2013-03-27 22:23:24 +01:00
{
2015-10-26 09:19:15 +01:00
NMPObject obj_id ;
2015-10-20 09:27:16 +02:00
nm_auto_nlmsg struct nl_msg * nlmsg = NULL ;
nlmsg = _nl_msg_new_address ( RTM_NEWADDR ,
NLM_F_CREATE | NLM_F_REPLACE ,
AF_INET6 ,
ifindex ,
& addr ,
plen ,
& peer_addr ,
flags ,
RT_SCOPE_UNIVERSE ,
lifetime ,
preferred ,
NULL ) ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
2015-10-20 09:27:16 +02:00
nmp_object_stackinit_id_ip6_address ( & obj_id , ifindex , & addr , plen ) ;
return do_add_addrroute ( platform , & obj_id , nlmsg ) ;
2013-03-27 22:23:24 +01:00
}
static gboolean
2016-04-06 18:04:26 +02:00
ip4_address_delete ( NMPlatform * platform , int ifindex , in_addr_t addr , guint8 plen , in_addr_t peer_address )
2013-03-27 22:23:24 +01:00
{
2015-10-20 09:27:16 +02:00
nm_auto_nlmsg struct nl_msg * nlmsg = NULL ;
2015-10-26 09:19:15 +01:00
NMPObject obj_id ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
2015-10-20 09:27:16 +02:00
nlmsg = _nl_msg_new_address ( RTM_DELADDR ,
0 ,
AF_INET ,
ifindex ,
& addr ,
plen ,
& peer_address ,
0 ,
RT_SCOPE_NOWHERE ,
NM_PLATFORM_LIFETIME_PERMANENT ,
NM_PLATFORM_LIFETIME_PERMANENT ,
NULL ) ;
2015-12-15 13:25:53 +01:00
if ( ! nlmsg )
g_return_val_if_reached ( FALSE ) ;
2015-10-20 09:27:16 +02:00
2015-10-26 09:19:15 +01:00
nmp_object_stackinit_id_ip4_address ( & obj_id , ifindex , addr , plen , peer_address ) ;
2015-10-20 09:27:16 +02:00
return do_delete_object ( platform , & obj_id , nlmsg ) ;
2013-03-27 22:23:24 +01:00
}
static gboolean
2016-04-06 18:04:26 +02:00
ip6_address_delete ( NMPlatform * platform , int ifindex , struct in6_addr addr , guint8 plen )
2013-03-27 22:23:24 +01:00
{
2015-10-20 09:27:16 +02:00
nm_auto_nlmsg struct nl_msg * nlmsg = NULL ;
2015-10-26 09:19:15 +01:00
NMPObject obj_id ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
2015-10-20 09:27:16 +02:00
nlmsg = _nl_msg_new_address ( RTM_DELADDR ,
0 ,
AF_INET6 ,
ifindex ,
& addr ,
plen ,
NULL ,
0 ,
RT_SCOPE_NOWHERE ,
NM_PLATFORM_LIFETIME_PERMANENT ,
NM_PLATFORM_LIFETIME_PERMANENT ,
NULL ) ;
2015-12-15 13:25:53 +01:00
if ( ! nlmsg )
g_return_val_if_reached ( FALSE ) ;
2015-10-20 09:27:16 +02:00
2015-10-26 09:19:15 +01:00
nmp_object_stackinit_id_ip6_address ( & obj_id , ifindex , & addr , plen ) ;
2015-10-20 09:27:16 +02:00
return do_delete_object ( platform , & obj_id , nlmsg ) ;
2013-03-27 22:23:24 +01:00
}
2015-07-14 12:37:58 +02:00
static const NMPlatformIP4Address *
2016-04-06 18:04:26 +02:00
ip4_address_get ( NMPlatform * platform , int ifindex , in_addr_t addr , guint8 plen , in_addr_t peer_address )
2013-03-27 22:23:24 +01:00
{
2015-10-26 09:19:15 +01:00
NMPObject obj_id ;
2015-07-14 12:37:58 +02:00
const NMPObject * obj ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
2015-10-26 09:19:15 +01:00
nmp_object_stackinit_id_ip4_address ( & obj_id , ifindex , addr , plen , peer_address ) ;
obj = nmp_cache_lookup_obj ( NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) - > cache , & obj_id ) ;
2015-07-14 12:37:58 +02:00
if ( nmp_object_is_visible ( obj ) )
return & obj - > ip4_address ;
return NULL ;
2013-03-27 22:23:24 +01:00
}
2015-07-14 12:37:58 +02:00
static const NMPlatformIP6Address *
2016-04-06 18:04:26 +02:00
ip6_address_get ( NMPlatform * platform , int ifindex , struct in6_addr addr , guint8 plen )
2013-03-27 22:23:24 +01:00
{
2015-10-26 09:19:15 +01:00
NMPObject obj_id ;
2015-07-14 12:37:58 +02:00
const NMPObject * obj ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
2015-10-26 09:19:15 +01:00
nmp_object_stackinit_id_ip6_address ( & obj_id , ifindex , & addr , plen ) ;
obj = nmp_cache_lookup_obj ( NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) - > cache , & obj_id ) ;
2015-07-14 12:37:58 +02:00
if ( nmp_object_is_visible ( obj ) )
return & obj - > ip6_address ;
return NULL ;
2013-03-27 22:23:24 +01:00
}
2016-10-02 18:22:50 +02:00
/*****************************************************************************/
2013-03-27 22:23:24 +01:00
2015-04-06 18:29:36 +02:00
static GArray *
2015-06-22 17:28:37 +02:00
ipx_route_get_all ( NMPlatform * platform , int ifindex , NMPObjectType obj_type , NMPlatformGetRouteFlags flags )
2015-04-06 18:29:36 +02:00
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
2015-06-22 17:08:58 +02:00
NMPCacheId cache_id ;
const NMPlatformIPRoute * const * routes ;
GArray * array ;
const NMPClass * klass ;
gboolean with_rtprot_kernel ;
guint i , len ;
2015-06-18 11:44:36 +02:00
2015-06-19 16:24:18 +02:00
nm_assert ( NM_IN_SET ( obj_type , NMP_OBJECT_TYPE_IP4_ROUTE , NMP_OBJECT_TYPE_IP6_ROUTE ) ) ;
2015-04-06 18:29:36 +02:00
2015-06-22 17:28:37 +02:00
if ( ! NM_FLAGS_ANY ( flags , NM_PLATFORM_GET_ROUTE_FLAGS_WITH_DEFAULT | NM_PLATFORM_GET_ROUTE_FLAGS_WITH_NON_DEFAULT ) )
flags | = NM_PLATFORM_GET_ROUTE_FLAGS_WITH_DEFAULT | NM_PLATFORM_GET_ROUTE_FLAGS_WITH_NON_DEFAULT ;
2015-04-06 18:29:36 +02:00
2015-06-22 17:08:58 +02:00
klass = nmp_class_from_type ( obj_type ) ;
nmp_cache_id_init_routes_visible ( & cache_id ,
obj_type ,
NM_FLAGS_HAS ( flags , NM_PLATFORM_GET_ROUTE_FLAGS_WITH_DEFAULT ) ,
NM_FLAGS_HAS ( flags , NM_PLATFORM_GET_ROUTE_FLAGS_WITH_NON_DEFAULT ) ,
ifindex ) ;
routes = ( const NMPlatformIPRoute * const * ) nmp_cache_lookup_multi ( priv - > cache , & cache_id , & len ) ;
array = g_array_sized_new ( FALSE , FALSE , klass - > sizeof_public , len ) ;
with_rtprot_kernel = NM_FLAGS_HAS ( flags , NM_PLATFORM_GET_ROUTE_FLAGS_WITH_RTPROT_KERNEL ) ;
for ( i = 0 ; i < len ; i + + ) {
nm_assert ( NMP_OBJECT_GET_CLASS ( NMP_OBJECT_UP_CAST ( routes [ i ] ) ) = = klass ) ;
if ( with_rtprot_kernel
2016-04-11 13:09:52 +02:00
| | routes [ i ] - > rt_source ! = NM_IP_CONFIG_SOURCE_RTPROT_KERNEL )
2015-06-22 17:08:58 +02:00
g_array_append_vals ( array , routes [ i ] , 1 ) ;
}
return array ;
2015-04-06 18:29:36 +02:00
}
2013-03-27 22:23:24 +01:00
static GArray *
2015-06-22 17:28:37 +02:00
ip4_route_get_all ( NMPlatform * platform , int ifindex , NMPlatformGetRouteFlags flags )
2013-03-27 22:23:24 +01:00
{
2015-06-22 17:28:37 +02:00
return ipx_route_get_all ( platform , ifindex , NMP_OBJECT_TYPE_IP4_ROUTE , flags ) ;
2013-03-27 22:23:24 +01:00
}
static GArray *
2015-06-22 17:28:37 +02:00
ip6_route_get_all ( NMPlatform * platform , int ifindex , NMPlatformGetRouteFlags flags )
2013-03-27 22:23:24 +01:00
{
2015-06-22 17:28:37 +02:00
return ipx_route_get_all ( platform , ifindex , NMP_OBJECT_TYPE_IP6_ROUTE , flags ) ;
2013-03-27 22:23:24 +01:00
}
static gboolean
2014-10-13 11:52:29 +02:00
ip4_route_add ( NMPlatform * platform , int ifindex , NMIPConfigSource source ,
2016-04-06 14:19:05 +02:00
in_addr_t network , guint8 plen , in_addr_t gateway ,
2015-10-29 09:26:10 +01:00
in_addr_t pref_src , guint32 metric , guint32 mss )
2013-03-27 22:23:24 +01:00
{
2015-10-26 09:19:15 +01:00
NMPObject obj_id ;
2015-10-20 09:27:16 +02:00
nm_auto_nlmsg struct nl_msg * nlmsg = NULL ;
nlmsg = _nl_msg_new_route ( RTM_NEWROUTE ,
NLM_F_CREATE | NLM_F_REPLACE ,
AF_INET ,
ifindex ,
source ,
gateway ? RT_SCOPE_UNIVERSE : RT_SCOPE_LINK ,
& network ,
plen ,
& gateway ,
metric ,
mss ,
pref_src ? & pref_src : NULL ) ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
2015-10-20 09:27:16 +02:00
nmp_object_stackinit_id_ip4_route ( & obj_id , ifindex , network , plen , metric ) ;
return do_add_addrroute ( platform , & obj_id , nlmsg ) ;
2013-03-27 22:23:24 +01:00
}
static gboolean
2014-10-13 11:52:29 +02:00
ip6_route_add ( NMPlatform * platform , int ifindex , NMIPConfigSource source ,
2016-04-06 14:19:05 +02:00
struct in6_addr network , guint8 plen , struct in6_addr gateway ,
2014-08-28 17:25:36 +02:00
guint32 metric , guint32 mss )
2013-03-27 22:23:24 +01:00
{
2015-10-26 09:19:15 +01:00
NMPObject obj_id ;
2015-10-20 09:27:16 +02:00
nm_auto_nlmsg struct nl_msg * nlmsg = NULL ;
nlmsg = _nl_msg_new_route ( RTM_NEWROUTE ,
NLM_F_CREATE | NLM_F_REPLACE ,
AF_INET6 ,
ifindex ,
source ,
! IN6_IS_ADDR_UNSPECIFIED ( & gateway ) ? RT_SCOPE_UNIVERSE : RT_SCOPE_LINK ,
& network ,
plen ,
& gateway ,
metric ,
mss ,
NULL ) ;
2014-12-22 17:06:13 +01:00
2015-10-20 09:27:16 +02:00
nmp_object_stackinit_id_ip6_route ( & obj_id , ifindex , & network , plen , metric ) ;
return do_add_addrroute ( platform , & obj_id , nlmsg ) ;
2013-03-27 22:23:24 +01:00
}
static gboolean
2016-04-06 14:19:05 +02:00
ip4_route_delete ( NMPlatform * platform , int ifindex , in_addr_t network , guint8 plen , guint32 metric )
2013-03-27 22:23:24 +01:00
{
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
2015-10-20 09:27:16 +02:00
nm_auto_nlmsg struct nl_msg * nlmsg = NULL ;
2015-10-26 09:19:15 +01:00
NMPObject obj_id ;
2014-02-13 15:11:05 +01:00
2015-10-26 09:19:15 +01:00
nmp_object_stackinit_id_ip4_route ( & obj_id , ifindex , network , plen , metric ) ;
platform: fix lookup of routes and deletion of IPv4 routes
When doing a lookup for an libnl route, the cache comparison function
for routes takes into account 'family', 'tos', 'table', 'dst', and 'prio'.
In NetworkManager we don't use all of these properties for a route, so
at several places when doing a cache lookup we don't have all identifying
properties. Usually we only have 'family' and 'dst' ('table' is
implicit 0, because NM does currently not care about any other tables).
The problem is that NM sees routes with different 'tos', 'prio', but it
cannot look them up in the cache. Add a hack to search the cache
fuzzy.
This is similar to the hack for link, where the identifying properties
are 'family' and 'ifindex', but we only have 'ifindex' at hand. However,
contrary to this hack, we coerce the 'family' to AF_UNSPEC for every link cache
operation. This is not viable in this case, because we internally need
the 'tos' field.
We need the 'tos' field because when deleting an IPv4 route, the 'tos' field must
match. See fib_table_delete(). This was already partially fixed by commit
f0daf90298d1bd9cafac7b9c02dc905327e0b85a, but before the lookup to the
cached object would fail for any non-zero 'tos'.
Signed-off-by: Thomas Haller <thaller@redhat.com>
2014-05-28 18:46:12 +02:00
2014-12-22 17:59:16 +01:00
if ( metric = = 0 ) {
/* Deleting an IPv4 route with metric 0 does not only delete an exectly matching route.
* If no route with metric 0 exists , it might delete another route to the same destination .
* For nm_platform_ip4_route_delete ( ) we don ' t want this semantic .
*
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
* Instead , make sure that we have the most recent state and process all
* delayed actions ( including re - reading data from netlink ) . */
delayed_action_handle_all ( platform , TRUE ) ;
2014-05-27 21:03:41 +02:00
2015-10-20 09:27:16 +02:00
if ( ! nmp_cache_lookup_obj ( priv - > cache , & obj_id ) ) {
/* hmm... we are about to delete an IP4 route with metric 0. We must only
* send the delete request if such a route really exists . Above we refreshed
* the platform cache , still no such route exists .
*
* Be extra careful and reload the routes . We must be sure that such a
* route doesn ' t exists , because when we add an IPv4 address , we immediately
* afterwards try to delete the kernel - added device route with metric 0.
* It might be , that we didn ' t yet get the notification about that route .
*
* FIXME : once our ip4_address_add ( ) is sure that upon return we have
* the latest state from in the platform cache , we might save this
* additional expensive cache - resync . */
2015-12-14 14:47:41 +01:00
do_request_one_type ( platform , NMP_OBJECT_TYPE_IP4_ROUTE ) ;
2014-02-23 14:57:50 +01:00
2015-10-20 09:27:16 +02:00
if ( ! nmp_cache_lookup_obj ( priv - > cache , & obj_id ) )
return TRUE ;
2014-02-23 14:57:50 +01:00
}
}
2014-05-27 21:03:41 +02:00
2015-10-20 09:27:16 +02:00
nlmsg = _nl_msg_new_route ( RTM_DELROUTE ,
0 ,
AF_INET ,
ifindex ,
NM_IP_CONFIG_SOURCE_UNKNOWN ,
RT_SCOPE_NOWHERE ,
& network ,
plen ,
NULL ,
metric ,
0 ,
NULL ) ;
if ( ! nlmsg )
return FALSE ;
2014-05-27 21:03:41 +02:00
2015-10-20 09:27:16 +02:00
return do_delete_object ( platform , & obj_id , nlmsg ) ;
2013-03-27 22:23:24 +01:00
}
static gboolean
2016-04-06 14:19:05 +02:00
ip6_route_delete ( NMPlatform * platform , int ifindex , struct in6_addr network , guint8 plen , guint32 metric )
2013-03-27 22:23:24 +01:00
{
2015-10-20 09:27:16 +02:00
nm_auto_nlmsg struct nl_msg * nlmsg = NULL ;
2015-10-26 09:19:15 +01:00
NMPObject obj_id ;
2013-05-02 08:06:08 +02:00
2014-12-22 17:06:13 +01:00
metric = nm_utils_ip6_route_metric_normalize ( metric ) ;
2015-10-20 09:27:16 +02:00
nlmsg = _nl_msg_new_route ( RTM_DELROUTE ,
0 ,
AF_INET6 ,
ifindex ,
NM_IP_CONFIG_SOURCE_UNKNOWN ,
RT_SCOPE_NOWHERE ,
& network ,
plen ,
NULL ,
metric ,
0 ,
NULL ) ;
if ( ! nlmsg )
return FALSE ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
2015-10-26 09:19:15 +01:00
nmp_object_stackinit_id_ip6_route ( & obj_id , ifindex , & network , plen , metric ) ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
2015-10-20 09:27:16 +02:00
return do_delete_object ( platform , & obj_id , nlmsg ) ;
2013-03-27 22:23:24 +01:00
}
2015-07-14 12:37:58 +02:00
static const NMPlatformIP4Route *
2016-04-06 14:19:05 +02:00
ip4_route_get ( NMPlatform * platform , int ifindex , in_addr_t network , guint8 plen , guint32 metric )
2013-03-27 22:23:24 +01:00
{
2015-10-26 09:19:15 +01:00
NMPObject obj_id ;
2015-07-14 12:37:58 +02:00
const NMPObject * obj ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
2015-10-26 09:19:15 +01:00
nmp_object_stackinit_id_ip4_route ( & obj_id , ifindex , network , plen , metric ) ;
obj = nmp_cache_lookup_obj ( NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) - > cache , & obj_id ) ;
2015-07-14 12:37:58 +02:00
if ( nmp_object_is_visible ( obj ) )
return & obj - > ip4_route ;
return NULL ;
2013-03-27 22:23:24 +01:00
}
2015-07-14 12:37:58 +02:00
static const NMPlatformIP6Route *
2016-04-06 14:19:05 +02:00
ip6_route_get ( NMPlatform * platform , int ifindex , struct in6_addr network , guint8 plen , guint32 metric )
2013-03-27 22:23:24 +01:00
{
2015-10-26 09:19:15 +01:00
NMPObject obj_id ;
2015-07-14 12:37:58 +02:00
const NMPObject * obj ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
2014-12-22 17:06:13 +01:00
metric = nm_utils_ip6_route_metric_normalize ( metric ) ;
2015-10-26 09:19:15 +01:00
nmp_object_stackinit_id_ip6_route ( & obj_id , ifindex , & network , plen , metric ) ;
obj = nmp_cache_lookup_obj ( NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) - > cache , & obj_id ) ;
2015-07-14 12:37:58 +02:00
if ( nmp_object_is_visible ( obj ) )
return & obj - > ip6_route ;
return NULL ;
2013-03-27 22:23:24 +01:00
}
2016-10-02 18:22:50 +02:00
/*****************************************************************************/
2013-03-27 22:23:24 +01:00
2013-03-27 22:23:24 +01:00
# define EVENT_CONDITIONS ((GIOCondition) (G_IO_IN | G_IO_PRI))
# define ERROR_CONDITIONS ((GIOCondition) (G_IO_ERR | G_IO_NVAL))
# define DISCONNECT_CONDITIONS ((GIOCondition) (G_IO_HUP))
static gboolean
event_handler ( GIOChannel * channel ,
2014-03-04 18:07:05 -05:00
GIOCondition io_condition ,
gpointer user_data )
2013-03-27 22:23:24 +01:00
{
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
delayed_action_handle_all ( NM_PLATFORM ( user_data ) , TRUE ) ;
return TRUE ;
}
2015-12-11 19:25:00 +01:00
/*****************************************************************************/
/* copied from libnl3's recvmsgs() */
static int
2015-12-13 10:30:26 +01:00
event_handler_recvmsgs ( NMPlatform * platform , gboolean handle_events )
2015-12-11 19:25:00 +01:00
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
2015-12-15 10:51:26 +01:00
struct nl_sock * sk = priv - > nlh ;
2016-02-12 16:25:33 +01:00
int n , err = 0 , multipart = 0 , interrupted = 0 ;
2015-12-11 19:25:00 +01:00
struct nlmsghdr * hdr ;
2015-12-14 14:47:41 +01:00
WaitForNlResponseResult seq_result ;
2015-12-11 19:25:00 +01:00
/*
nla is passed on to not only to nl_recv ( ) but may also be passed
to a function pointer provided by the caller which may or may not
initialize the variable . Thomas Graf .
*/
struct sockaddr_nl nla = { 0 } ;
2016-02-12 15:46:20 +01:00
nm_auto_free struct ucred * creds = NULL ;
nm_auto_free unsigned char * buf = NULL ;
2015-12-11 19:25:00 +01:00
continue_reading :
2016-02-12 15:46:20 +01:00
g_clear_pointer ( & buf , free ) ;
g_clear_pointer ( & creds , free ) ;
2015-12-13 10:09:02 +01:00
errno = 0 ;
2015-12-13 09:59:10 +01:00
n = nl_recv ( sk , & nla , & buf , & creds ) ;
2015-12-11 19:25:00 +01:00
2016-06-24 18:59:58 +02:00
if ( n < = 0 ) {
/* workaround libnl3 <= 3.2.15 returning danling pointers in case nl_recv()
* fails . Fixed by libnl3 69468517 d0de1675d80f24661ff57a5dbac7275c . */
buf = NULL ;
creds = NULL ;
}
2016-01-25 15:07:10 +01:00
switch ( n ) {
case 0 :
/* Work around a libnl bug fixed in 3.2.22 (375a6294) */
if ( errno = = EAGAIN ) {
/* EAGAIN is equal to EWOULDBLOCK. If it would not be, we'd have to
* workaround libnl3 mapping EWOULDBLOCK to - NLE_FAILURE . */
G_STATIC_ASSERT ( EAGAIN = = EWOULDBLOCK ) ;
n = - NLE_AGAIN ;
}
break ;
2016-11-30 16:40:54 +01:00
case - NLE_MSG_TRUNC : {
int buf_size ;
/* the message receive buffer was too small. We lost one message, which
* is unfortunate . Try to double the buffer size for the next time . */
buf_size = nl_socket_get_msg_buf_size ( sk ) ;
if ( buf_size < 512 * 1024 ) {
buf_size * = 2 ;
_LOGT ( " netlink: recvmsg: increase message buffer size for recvmsg() to %d bytes " , buf_size ) ;
if ( nl_socket_set_msg_buf_size ( sk , buf_size ) < 0 )
nm_assert_not_reached ( ) ;
if ( ! handle_events )
goto continue_reading ;
}
n = - _NLE_MSG_TRUNC ;
break ;
}
2016-01-25 15:08:32 +01:00
case - NLE_NOMEM :
if ( errno = = ENOBUFS ) {
/* we are very much interested in a overrun of the receive buffer.
* nl_recv ( ) maps all kinds of errors to NLE_NOMEM , so check also
* for errno explicitly . And if so , hack our own return code to signal
* the overrun . */
n = - _NLE_NM_NOBUFS ;
}
break ;
2015-12-13 10:09:02 +01:00
}
2015-12-11 19:25:00 +01:00
if ( n < = 0 )
return n ;
hdr = ( struct nlmsghdr * ) buf ;
2015-12-13 09:59:10 +01:00
while ( nlmsg_ok ( hdr , n ) ) {
2016-02-12 15:46:20 +01:00
nm_auto_nlmsg struct nl_msg * msg = NULL ;
2015-12-14 14:47:41 +01:00
gboolean abort_parsing = FALSE ;
2016-04-07 21:19:45 +02:00
gboolean process_valid_msg = FALSE ;
guint32 seq_number ;
2015-12-14 14:47:41 +01:00
2015-12-13 09:59:10 +01:00
msg = nlmsg_convert ( hdr ) ;
2015-12-11 19:25:00 +01:00
if ( ! msg ) {
err = - NLE_NOMEM ;
goto out ;
}
2015-12-13 09:59:10 +01:00
nlmsg_set_proto ( msg , NETLINK_ROUTE ) ;
nlmsg_set_src ( msg , & nla ) ;
2015-12-11 19:25:00 +01:00
2015-12-13 10:03:22 +01:00
if ( ! creds | | creds - > pid ) {
if ( creds )
2016-02-12 16:36:25 +01:00
_LOGT ( " netlink: recvmsg: received non-kernel message (pid %d) " , creds - > pid ) ;
2015-12-13 10:03:22 +01:00
else
2016-02-12 16:36:25 +01:00
_LOGT ( " netlink: recvmsg: received message without credentials " ) ;
2016-02-12 16:39:05 +01:00
err = 0 ;
2015-12-13 10:03:22 +01:00
goto stop ;
}
2015-12-14 14:47:41 +01:00
_LOGt ( " netlink: recvmsg: new message type %d, seq %u " ,
hdr - > nlmsg_type , hdr - > nlmsg_seq ) ;
2015-12-13 10:03:22 +01:00
if ( creds )
nlmsg_set_creds ( msg , creds ) ;
2015-12-11 19:25:00 +01:00
if ( hdr - > nlmsg_flags & NLM_F_MULTI )
multipart = 1 ;
if ( hdr - > nlmsg_flags & NLM_F_DUMP_INTR ) {
/*
* We have to continue reading to clear
* all messages until a NLMSG_DONE is
* received and report the inconsistency .
*/
interrupted = 1 ;
}
/* Other side wishes to see an ack for this message */
if ( hdr - > nlmsg_flags & NLM_F_ACK ) {
/* FIXME: implement */
}
2015-12-14 14:47:41 +01:00
seq_result = WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_UNKNOWN ;
2015-12-11 19:25:00 +01:00
if ( hdr - > nlmsg_type = = NLMSG_DONE ) {
/* messages terminates a multipart message, this is
* usually the end of a message and therefore we slip
* out of the loop by default . the user may overrule
* this action by skipping this packet . */
multipart = 0 ;
2015-12-14 14:47:41 +01:00
seq_result = WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_OK ;
2015-12-11 19:25:00 +01:00
} else if ( hdr - > nlmsg_type = = NLMSG_NOOP ) {
/* Message to be ignored, the default action is to
* skip this message if no callback is specified . The
* user may overrule this action by returning
* NL_PROCEED . */
} else if ( hdr - > nlmsg_type = = NLMSG_OVERRUN ) {
/* Data got lost, report back to user. The default action is to
* quit parsing . The user may overrule this action by retuning
* NL_SKIP or NL_PROCEED ( dangerous ) */
err = - NLE_MSG_OVERFLOW ;
2015-12-14 14:47:41 +01:00
abort_parsing = TRUE ;
2015-12-11 19:25:00 +01:00
} else if ( hdr - > nlmsg_type = = NLMSG_ERROR ) {
/* Message carries a nlmsgerr */
2015-12-13 09:59:10 +01:00
struct nlmsgerr * e = nlmsg_data ( hdr ) ;
2015-12-11 19:25:00 +01:00
2015-12-13 09:59:10 +01:00
if ( hdr - > nlmsg_len < nlmsg_size ( sizeof ( * e ) ) ) {
2015-12-11 19:25:00 +01:00
/* Truncated error message, the default action
* is to stop parsing . The user may overrule
* this action by returning NL_SKIP or
* NL_PROCEED ( dangerous ) */
err = - NLE_MSG_TRUNC ;
2015-12-14 14:47:41 +01:00
abort_parsing = TRUE ;
2015-12-11 19:25:00 +01:00
} else if ( e - > error ) {
2015-12-14 14:47:41 +01:00
int errsv = e - > error > 0 ? e - > error : - e - > error ;
2015-12-11 19:25:00 +01:00
/* Error message reported back from kernel. */
2015-12-13 10:25:17 +01:00
_LOGD ( " netlink: recvmsg: error message from kernel: %s (%d) for request %d " ,
2015-12-14 14:47:41 +01:00
strerror ( errsv ) ,
errsv ,
nlmsg_hdr ( msg ) - > nlmsg_seq ) ;
seq_result = - errsv ;
} else
seq_result = WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_OK ;
2016-04-07 21:19:45 +02:00
} else
process_valid_msg = TRUE ;
seq_number = nlmsg_hdr ( msg ) - > nlmsg_seq ;
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
/* check whether the seq number is different from before, and
* whether the previous number ( @ nlh_seq_last_seen ) is a pending
* refresh - all request . In that case , the pending request is thereby
* completed .
*
* We must do that before processing the message with event_valid_msg ( ) ,
* because we must track the completion of the pending request before that . */
event_seq_check_refresh_all ( platform , seq_number ) ;
2016-04-07 21:19:45 +02:00
if ( process_valid_msg ) {
2015-12-11 19:25:00 +01:00
/* Valid message (not checking for MULTIPART bit to
* get along with broken kernels . NL_SKIP has no
* effect on this . */
2016-01-24 18:46:14 +01:00
event_valid_msg ( platform , msg , handle_events ) ;
2015-12-14 14:47:41 +01:00
seq_result = WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_OK ;
2015-12-11 19:25:00 +01:00
}
2015-12-14 14:47:41 +01:00
2016-04-07 21:19:45 +02:00
event_seq_check ( platform , seq_number , seq_result ) ;
2015-12-14 14:47:41 +01:00
2016-02-12 16:41:51 +01:00
if ( abort_parsing )
goto stop ;
2016-02-12 16:20:52 +01:00
err = 0 ;
hdr = nlmsg_next ( hdr , & n ) ;
2015-12-11 19:25:00 +01:00
}
if ( multipart ) {
/* Multipart message not yet complete, continue reading */
goto continue_reading ;
}
stop :
2016-01-25 14:38:35 +01:00
if ( ! handle_events ) {
/* when we don't handle events, we want to drain all messages from the socket
* without handling the messages ( but still check for sequence numbers ) .
* Repeat reading . */
goto continue_reading ;
}
2015-12-11 19:25:00 +01:00
out :
if ( interrupted )
err = - NLE_DUMP_INTR ;
return err ;
}
/*****************************************************************************/
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
static gboolean
2015-12-15 10:40:41 +01:00
event_handler_read_netlink ( NMPlatform * platform , gboolean wait_for_acks )
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
{
2016-02-19 01:06:28 +01:00
nm_auto_pop_netns NMPNetns * netns = NULL ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
2015-12-15 10:40:41 +01:00
int r , nle ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
struct pollfd pfd ;
gboolean any = FALSE ;
2015-12-14 14:47:41 +01:00
gint64 now_ns ;
int timeout_ms ;
guint i ;
struct {
guint32 seq_number ;
gint64 timeout_abs_ns ;
} data_next ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
2016-02-19 01:06:28 +01:00
if ( ! nm_platform_netns_push ( platform , & netns ) )
return FALSE ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
while ( TRUE ) {
2015-12-15 10:40:41 +01:00
while ( TRUE ) {
nle = event_handler_recvmsgs ( platform , TRUE ) ;
if ( nle < 0 )
switch ( nle ) {
case - NLE_AGAIN :
goto after_read ;
case - NLE_DUMP_INTR :
_LOGD ( " netlink: read: uncritical failure to retrieve incoming events: %s (%d) " , nl_geterror ( nle ) , nle ) ;
break ;
2016-11-30 16:40:54 +01:00
case - _NLE_MSG_TRUNC :
2016-01-25 15:08:32 +01:00
case - _NLE_NM_NOBUFS :
2016-11-30 16:40:54 +01:00
_LOGI ( " netlink: read: %s. Need to resynchronize platform cache " ,
( {
const char * _reason = " unknown " ;
switch ( nle ) {
case - _NLE_MSG_TRUNC : _reason = " message truncated " ; break ;
case - _NLE_NM_NOBUFS : _reason = " too many netlink events " ; break ;
}
_reason ;
} ) ) ;
2015-12-15 10:40:41 +01:00
event_handler_recvmsgs ( platform , FALSE ) ;
2016-01-24 18:46:14 +01:00
delayed_action_wait_for_nl_response_complete_all ( platform , WAIT_FOR_NL_RESPONSE_RESULT_FAILED_RESYNC ) ;
2015-12-15 10:40:41 +01:00
delayed_action_schedule ( platform ,
DELAYED_ACTION_TYPE_REFRESH_ALL_LINKS |
DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ADDRESSES |
DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ADDRESSES |
DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ROUTES |
DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ROUTES ,
NULL ) ;
break ;
default :
_LOGE ( " netlink: read: failed to retrieve incoming events: %s (%d) " , nl_geterror ( nle ) , nle ) ;
break ;
}
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
any = TRUE ;
2015-12-15 10:40:41 +01:00
}
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
2015-12-14 14:47:41 +01:00
after_read :
if ( ! NM_FLAGS_HAS ( priv - > delayed_action . flags , DELAYED_ACTION_TYPE_WAIT_FOR_NL_RESPONSE ) )
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
return any ;
2015-12-14 14:47:41 +01:00
now_ns = 0 ;
data_next . seq_number = 0 ;
data_next . timeout_abs_ns = 0 ;
for ( i = 0 ; i < priv - > delayed_action . list_wait_for_nl_response - > len ; ) {
DelayedActionWaitForNlResponseData * data = & g_array_index ( priv - > delayed_action . list_wait_for_nl_response , DelayedActionWaitForNlResponseData , i ) ;
if ( data - > seq_result )
delayed_action_wait_for_nl_response_complete ( platform , i , data - > seq_result ) ;
else if ( ( now_ns ? : ( now_ns = nm_utils_get_monotonic_timestamp_ns ( ) ) ) > data - > timeout_abs_ns )
delayed_action_wait_for_nl_response_complete ( platform , i , WAIT_FOR_NL_RESPONSE_RESULT_FAILED_TIMEOUT ) ;
else {
i + + ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
2015-12-14 14:47:41 +01:00
if ( data_next . seq_number = = 0
2016-01-21 16:53:56 +01:00
| | data_next . timeout_abs_ns > data - > timeout_abs_ns ) {
2015-12-14 14:47:41 +01:00
data_next . seq_number = data - > seq_number ;
data_next . timeout_abs_ns = data - > timeout_abs_ns ;
2016-01-21 16:53:56 +01:00
}
2015-12-14 14:47:41 +01:00
}
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
}
2015-12-14 14:47:41 +01:00
if ( ! wait_for_acks
| | ! NM_FLAGS_HAS ( priv - > delayed_action . flags , DELAYED_ACTION_TYPE_WAIT_FOR_NL_RESPONSE ) )
return any ;
nm_assert ( data_next . seq_number ) ;
nm_assert ( data_next . timeout_abs_ns > 0 ) ;
nm_assert ( now_ns > 0 ) ;
2015-12-15 10:40:41 +01:00
_LOGT ( " netlink: read: wait for ACK for sequence number %u... " , data_next . seq_number ) ;
2015-12-14 14:47:41 +01:00
timeout_ms = ( data_next . timeout_abs_ns - now_ns ) / ( NM_UTILS_NS_PER_SECOND / 1000 ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
memset ( & pfd , 0 , sizeof ( pfd ) ) ;
2015-12-15 10:51:26 +01:00
pfd . fd = nl_socket_get_fd ( priv - > nlh ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
pfd . events = POLLIN ;
2015-12-14 14:47:41 +01:00
r = poll ( & pfd , 1 , MAX ( 1 , timeout_ms ) ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
if ( r = = 0 ) {
2015-12-14 14:47:41 +01:00
/* timeout and there is nothing to read. */
goto after_read ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
}
if ( r < 0 ) {
int errsv = errno ;
if ( errsv ! = EINTR ) {
2015-12-15 10:40:41 +01:00
_LOGE ( " netlink: read: poll failed with %s " , strerror ( errsv ) ) ;
2015-12-14 14:47:41 +01:00
delayed_action_wait_for_nl_response_complete_all ( platform , WAIT_FOR_NL_RESPONSE_RESULT_FAILED_POLL ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
return any ;
}
/* Continue to read again, even if there might be nothing to read after EINTR. */
}
}
}
2016-10-02 18:22:50 +02:00
/*****************************************************************************/
2013-03-27 22:23:24 +01:00
2015-04-06 18:29:36 +02:00
static void
cache_update_link_udev ( NMPlatform * platform , int ifindex , GUdevDevice * udev_device )
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
2015-10-20 14:43:31 +02:00
nm_auto_nmpobj NMPObject * obj_cache = NULL ;
2015-04-06 18:29:36 +02:00
gboolean was_visible ;
NMPCacheOpsType cache_op ;
cache_op = nmp_cache_update_link_udev ( priv - > cache , ifindex , udev_device , & obj_cache , & was_visible , cache_pre_hook , platform ) ;
2016-02-19 01:06:28 +01:00
if ( cache_op ! = NMP_CACHE_OPS_UNCHANGED ) {
nm_auto_pop_netns NMPNetns * netns = NULL ;
if ( ! nm_platform_netns_push ( platform , & netns ) )
return ;
do_emit_signal ( platform , obj_cache , cache_op , was_visible ) ;
}
2015-04-06 18:29:36 +02:00
}
2013-05-29 12:00:50 -03:00
static void
udev_device_added ( NMPlatform * platform ,
GUdevDevice * udev_device )
{
2013-08-07 12:35:05 -05:00
const char * ifname ;
2013-05-29 12:00:50 -03:00
int ifindex ;
ifname = g_udev_device_get_name ( udev_device ) ;
if ( ! ifname ) {
2015-08-30 16:01:55 +02:00
_LOGD ( " udev-add: failed to get device's interface " ) ;
2013-05-29 12:00:50 -03:00
return ;
}
2016-03-04 13:19:34 +01:00
if ( ! g_udev_device_get_property ( udev_device , " IFINDEX " ) ) {
_LOGW ( " udev-add[%s]failed to get device's ifindex " , ifname ) ;
2014-04-17 14:57:55 +02:00
return ;
}
2016-03-04 13:19:34 +01:00
ifindex = g_udev_device_get_property_as_int ( udev_device , " IFINDEX " ) ;
2014-04-17 14:57:55 +02:00
if ( ifindex < = 0 ) {
2016-03-04 13:19:34 +01:00
_LOGW ( " udev-add[%s]: retrieved invalid IFINDEX=%d " , ifname , ifindex ) ;
2013-05-29 12:00:50 -03:00
return ;
}
if ( ! g_udev_device_get_sysfs_path ( udev_device ) ) {
2016-03-04 13:19:34 +01:00
_LOGD ( " udev-add[%s,%d]: couldn't determine device path; ignoring... " , ifname , ifindex ) ;
2013-05-29 12:00:50 -03:00
return ;
}
2016-03-04 13:19:34 +01:00
_LOGT ( " udev-add[%s,%d]: device added " , ifname , ifindex ) ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
cache_update_link_udev ( platform , ifindex , udev_device ) ;
}
2014-04-17 14:57:55 +02:00
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
static gboolean
_udev_device_removed_match_link ( const NMPObject * obj , gpointer udev_device )
{
return obj - > _link . udev . device = = udev_device ;
2013-05-29 12:00:50 -03:00
}
static void
udev_device_removed ( NMPlatform * platform ,
GUdevDevice * udev_device )
{
2013-07-26 17:03:39 +02:00
int ifindex = 0 ;
2013-05-29 12:00:50 -03:00
2014-04-17 14:57:55 +02:00
if ( g_udev_device_get_property ( udev_device , " IFINDEX " ) )
2013-10-16 18:24:59 +02:00
ifindex = g_udev_device_get_property_as_int ( udev_device , " IFINDEX " ) ;
2014-04-17 14:57:55 +02:00
else {
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
const NMPObject * obj ;
2013-05-29 12:00:50 -03:00
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
obj = nmp_cache_lookup_link_full ( NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) - > cache ,
0 , NULL , FALSE , NM_LINK_TYPE_NONE , _udev_device_removed_match_link , udev_device ) ;
if ( obj )
ifindex = obj - > link . ifindex ;
2013-05-29 12:00:50 -03:00
}
2013-07-26 17:03:39 +02:00
2015-08-30 16:01:55 +02:00
_LOGD ( " udev-remove: IFINDEX=%d " , ifindex ) ;
2014-04-17 14:57:55 +02:00
if ( ifindex < = 0 )
return ;
2013-07-26 17:03:39 +02:00
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
cache_update_link_udev ( platform , ifindex , NULL ) ;
2013-05-29 12:00:50 -03:00
}
static void
handle_udev_event ( GUdevClient * client ,
const char * action ,
GUdevDevice * udev_device ,
gpointer user_data )
{
2016-02-19 01:06:28 +01:00
nm_auto_pop_netns NMPNetns * netns = NULL ;
2013-05-29 12:00:50 -03:00
NMPlatform * platform = NM_PLATFORM ( user_data ) ;
const char * subsys ;
2013-10-15 19:45:42 +02:00
const char * ifindex ;
2013-10-16 18:24:59 +02:00
guint64 seqnum ;
2013-05-29 12:00:50 -03:00
g_return_if_fail ( action ! = NULL ) ;
2016-02-19 01:06:28 +01:00
if ( ! nm_platform_netns_push ( platform , & netns ) )
return ;
2013-05-29 12:00:50 -03:00
/* A bit paranoid */
subsys = g_udev_device_get_subsystem ( udev_device ) ;
g_return_if_fail ( ! g_strcmp0 ( subsys , " net " ) ) ;
2013-10-16 18:24:59 +02:00
ifindex = g_udev_device_get_property ( udev_device , " IFINDEX " ) ;
seqnum = g_udev_device_get_seqnum ( udev_device ) ;
2015-08-30 16:01:55 +02:00
_LOGD ( " UDEV event: action '%s' subsys '%s' device '%s' (%s); seqnum=% " G_GUINT64_FORMAT ,
action , subsys , g_udev_device_get_name ( udev_device ) ,
ifindex ? ifindex : " unknown " , seqnum ) ;
2013-05-29 12:00:50 -03:00
2014-03-07 13:30:30 +01:00
if ( ! strcmp ( action , " add " ) | | ! strcmp ( action , " move " ) )
2013-05-29 12:00:50 -03:00
udev_device_added ( platform , udev_device ) ;
if ( ! strcmp ( action , " remove " ) )
udev_device_removed ( platform , udev_device ) ;
}
2016-10-02 18:22:50 +02:00
/*****************************************************************************/
2013-05-29 12:00:50 -03:00
2013-03-27 22:23:24 +01:00
static void
2015-05-10 09:16:31 +02:00
nm_linux_platform_init ( NMLinuxPlatform * self )
2013-03-27 22:23:24 +01:00
{
2016-09-29 13:49:01 +02:00
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( self ) ;
2016-03-04 20:57:07 +01:00
gboolean use_udev ;
2016-02-19 01:06:28 +01:00
use_udev = nmp_netns_is_initial ( )
& & access ( " /sys " , W_OK ) = = 0 ;
2015-05-10 09:16:31 +02:00
2015-12-14 11:53:46 +01:00
priv - > nlh_seq_next = 1 ;
2016-03-04 20:57:07 +01:00
priv - > cache = nmp_cache_new ( use_udev ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
priv - > delayed_action . list_master_connected = g_ptr_array_new ( ) ;
priv - > delayed_action . list_refresh_link = g_ptr_array_new ( ) ;
2015-12-14 14:47:41 +01:00
priv - > delayed_action . list_wait_for_nl_response = g_array_new ( FALSE , TRUE , sizeof ( DelayedActionWaitForNlResponseData ) ) ;
2015-05-12 07:14:55 +02:00
priv - > wifi_data = g_hash_table_new_full ( NULL , NULL , NULL , ( GDestroyNotify ) wifi_utils_deinit ) ;
2016-03-04 20:57:07 +01:00
if ( use_udev )
priv - > udev_client = g_udev_client_new ( ( const char * [ ] ) { " net " , NULL } ) ;
2013-03-27 22:23:24 +01:00
}
2015-04-18 12:53:45 +02:00
static void
constructed ( GObject * _object )
2013-03-27 22:23:24 +01:00
{
2015-04-18 12:53:45 +02:00
NMPlatform * platform = NM_PLATFORM ( _object ) ;
2013-03-27 22:23:24 +01:00
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
int channel_flags ;
gboolean status ;
int nle ;
2016-02-19 01:06:28 +01:00
nm_assert ( ! platform - > _netns | | platform - > _netns = = nmp_netns_get_current ( ) ) ;
_LOGD ( " create (%s netns, %s, %s udev) " ,
! platform - > _netns ? " ignore " : " use " ,
! platform - > _netns & & nmp_netns_is_initial ( )
? " initial netns "
: ( ! nmp_netns_get_current ( )
? " no netns support "
: nm_sprintf_bufa ( 100 , " in netns[%p]%s " ,
nmp_netns_get_current ( ) ,
nmp_netns_get_current ( ) = = nmp_netns_get_initial ( ) ? " /main " : " " ) ) ,
2016-03-04 20:57:07 +01:00
nmp_cache_use_udev_get ( priv - > cache ) ? " use " : " no " ) ;
2015-05-12 07:14:55 +02:00
2016-01-04 18:08:04 +01:00
priv - > nlh = nl_socket_alloc ( ) ;
g_assert ( priv - > nlh ) ;
nle = nl_connect ( priv - > nlh , NETLINK_ROUTE ) ;
g_assert ( ! nle ) ;
nle = nl_socket_set_passcred ( priv - > nlh , 1 ) ;
g_assert ( ! nle ) ;
/* No blocking for event socket, so that we can drain it safely. */
nle = nl_socket_set_nonblocking ( priv - > nlh ) ;
g_assert ( ! nle ) ;
2016-01-25 15:22:44 +01:00
/* use 8 MB for receive socket kernel queue. */
nle = nl_socket_set_buffer_size ( priv - > nlh , 8 * 1024 * 1024 , 0 ) ;
2016-01-04 18:08:04 +01:00
g_assert ( ! nle ) ;
2016-11-30 16:40:54 +01:00
/* explicitly set the msg buffer size and disable MSG_PEEK.
* If we later encounter NLE_MSG_TRUNC , we will adjust the buffer size . */
nl_socket_disable_msg_peek ( priv - > nlh ) ;
platform: increase initial buffer size of libnl's nl_recvmsg() to 32K
Since commit 9fafb382db273160a5e9e10ff84f8a4d2af220d3, we would
explicitly set libnl's socket buffer size to 4*getpagesize().
That is also the default of libnl itself. Additionally, we would
workaround too small buffers by increasing the buffer size up to 512K.
A too small buffer causes messages to be lost. Usually, that only
results in a cache-resync, which isn't too bad. Lost messages are however
a problem if the lost message was an ACK that we were waiting for.
However, it is rather unlikely to happen, because it's expected that
the buffer size gets adjusted already when the cache is filled initially,
before any other requests are pending.
Still, let's increase the default buffer size to 32K, hoping that this
initial value is already large enough to avoid the problem altogether.
Note that iproute2 also uses a buffer size of 32K [1] [2].
Alternatively, we could use MSG_PEEK like systemd does [3]. However,
that requires two syscalls per message.
[1] https://patchwork.ozlabs.org/patch/592178/
[2] https://git.kernel.org/cgit/linux/kernel/git/shemminger/iproute2.git/tree/lib/libnetlink.c?id=f5f760b81250630da23a4021c30e802695be79d2#n274
[3] https://github.com/systemd/systemd/blob/cd66af227416eb7b9f150b92abff4e4a3e92253b/src/libsystemd/sd-netlink/netlink-socket.c#L323
2016-12-02 14:57:23 +01:00
nle = nl_socket_set_msg_buf_size ( priv - > nlh , 32 * 1024 ) ;
2016-11-30 16:40:54 +01:00
g_assert ( ! nle ) ;
2016-01-04 18:08:04 +01:00
nle = nl_socket_add_memberships ( priv - > nlh ,
RTNLGRP_LINK ,
RTNLGRP_IPV4_IFADDR , RTNLGRP_IPV6_IFADDR ,
RTNLGRP_IPV4_ROUTE , RTNLGRP_IPV6_ROUTE ,
0 ) ;
g_assert ( ! nle ) ;
2015-12-15 10:51:26 +01:00
_LOGD ( " Netlink socket for events established: port=%u, fd=%d " , nl_socket_get_local_port ( priv - > nlh ) , nl_socket_get_fd ( priv - > nlh ) ) ;
2013-03-27 22:23:24 +01:00
2015-12-15 10:51:26 +01:00
priv - > event_channel = g_io_channel_unix_new ( nl_socket_get_fd ( priv - > nlh ) ) ;
2013-03-27 22:23:24 +01:00
g_io_channel_set_encoding ( priv - > event_channel , NULL , NULL ) ;
g_io_channel_set_close_on_unref ( priv - > event_channel , TRUE ) ;
channel_flags = g_io_channel_get_flags ( priv - > event_channel ) ;
status = g_io_channel_set_flags ( priv - > event_channel ,
2016-03-04 20:57:07 +01:00
channel_flags | G_IO_FLAG_NONBLOCK , NULL ) ;
2013-03-27 22:23:24 +01:00
g_assert ( status ) ;
priv - > event_id = g_io_add_watch ( priv - > event_channel ,
2015-05-12 07:14:55 +02:00
( EVENT_CONDITIONS | ERROR_CONDITIONS | DISCONNECT_CONDITIONS ) ,
event_handler , platform ) ;
2014-07-24 15:57:08 -05:00
2015-05-12 07:14:55 +02:00
/* complete construction of the GObject instance before populating the cache. */
G_OBJECT_CLASS ( nm_linux_platform_parent_class ) - > constructed ( _object ) ;
_LOGD ( " populate platform cache " ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
delayed_action_schedule ( platform ,
DELAYED_ACTION_TYPE_REFRESH_ALL_LINKS |
DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ADDRESSES |
DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ADDRESSES |
DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ROUTES |
DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ROUTES ,
NULL ) ;
2015-05-12 07:14:55 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
delayed_action_handle_all ( platform , FALSE ) ;
2015-04-13 16:29:37 -05:00
2016-03-04 20:57:07 +01:00
/* Set up udev monitoring */
if ( priv - > udev_client ) {
GUdevEnumerator * enumerator ;
GList * devices , * iter ;
g_signal_connect ( priv - > udev_client , " uevent " , G_CALLBACK ( handle_udev_event ) , platform ) ;
/* And read initial device list */
enumerator = g_udev_enumerator_new ( priv - > udev_client ) ;
g_udev_enumerator_add_match_subsystem ( enumerator , " net " ) ;
2014-11-21 15:02:37 +01:00
2016-03-04 20:57:07 +01:00
g_udev_enumerator_add_match_is_initialized ( enumerator ) ;
2013-05-29 12:00:50 -03:00
2016-03-04 20:57:07 +01:00
devices = g_udev_enumerator_execute ( enumerator ) ;
for ( iter = devices ; iter ; iter = g_list_next ( iter ) ) {
udev_device_added ( platform , G_UDEV_DEVICE ( iter - > data ) ) ;
g_object_unref ( G_UDEV_DEVICE ( iter - > data ) ) ;
}
g_list_free ( devices ) ;
g_object_unref ( enumerator ) ;
2013-05-29 12:00:50 -03:00
}
2013-03-27 22:23:24 +01:00
}
2015-04-14 16:39:51 +02:00
static void
dispose ( GObject * object )
{
2015-04-06 18:29:36 +02:00
NMPlatform * platform = NM_PLATFORM ( object ) ;
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
2015-05-12 07:14:55 +02:00
_LOGD ( " dispose " ) ;
2015-12-14 14:47:41 +01:00
delayed_action_wait_for_nl_response_complete_all ( platform , WAIT_FOR_NL_RESPONSE_RESULT_FAILED_DISPOSING ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
priv - > delayed_action . flags = DELAYED_ACTION_TYPE_NONE ;
g_ptr_array_set_size ( priv - > delayed_action . list_master_connected , 0 ) ;
g_ptr_array_set_size ( priv - > delayed_action . list_refresh_link , 0 ) ;
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
g_clear_pointer ( & priv - > prune_candidates , g_hash_table_unref ) ;
2016-03-04 20:57:07 +01:00
if ( priv - > udev_client ) {
g_signal_handlers_disconnect_by_func ( priv - > udev_client , G_CALLBACK ( handle_udev_event ) , platform ) ;
g_clear_object ( & priv - > udev_client ) ;
}
2015-04-14 16:39:51 +02:00
G_OBJECT_CLASS ( nm_linux_platform_parent_class ) - > dispose ( object ) ;
}
2013-03-27 22:23:24 +01:00
static void
2016-09-29 13:49:01 +02:00
finalize ( GObject * object )
2013-03-27 22:23:24 +01:00
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( object ) ;
2015-04-06 18:29:36 +02:00
nmp_cache_free ( priv - > cache ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
g_ptr_array_unref ( priv - > delayed_action . list_master_connected ) ;
g_ptr_array_unref ( priv - > delayed_action . list_refresh_link ) ;
2015-12-14 14:47:41 +01:00
g_array_unref ( priv - > delayed_action . list_wait_for_nl_response ) ;
2015-04-06 18:29:36 +02:00
2013-03-27 22:23:24 +01:00
g_source_remove ( priv - > event_id ) ;
g_io_channel_unref ( priv - > event_channel ) ;
2015-12-15 10:51:26 +01:00
nl_socket_free ( priv - > nlh ) ;
2013-03-27 22:23:24 +01:00
2014-02-04 14:27:03 +01:00
g_hash_table_unref ( priv - > wifi_data ) ;
2013-05-29 12:00:50 -03:00
2015-10-06 19:48:35 +02:00
if ( priv - > sysctl_get_prev_values ) {
sysctl_clear_cache_list = g_slist_remove ( sysctl_clear_cache_list , object ) ;
2015-08-30 15:51:20 +02:00
g_hash_table_destroy ( priv - > sysctl_get_prev_values ) ;
2015-10-06 19:48:35 +02:00
}
2015-08-30 15:51:20 +02:00
2013-03-27 22:23:24 +01:00
G_OBJECT_CLASS ( nm_linux_platform_parent_class ) - > finalize ( object ) ;
}
static void
nm_linux_platform_class_init ( NMLinuxPlatformClass * klass )
{
GObjectClass * object_class = G_OBJECT_CLASS ( klass ) ;
NMPlatformClass * platform_class = NM_PLATFORM_CLASS ( klass ) ;
2015-04-18 12:53:45 +02:00
object_class - > constructed = constructed ;
2015-04-14 16:39:51 +02:00
object_class - > dispose = dispose ;
2016-09-29 13:49:01 +02:00
object_class - > finalize = finalize ;
2013-03-27 22:23:24 +01:00
2013-04-03 16:10:38 +02:00
platform_class - > sysctl_set = sysctl_set ;
platform_class - > sysctl_get = sysctl_get ;
2014-04-22 16:02:15 +02:00
platform_class - > link_get = _nm_platform_link_get ;
2015-06-20 12:05:01 +02:00
platform_class - > link_get_by_ifname = _nm_platform_link_get_by_ifname ;
2014-09-18 12:16:11 -05:00
platform_class - > link_get_by_address = _nm_platform_link_get_by_address ;
2013-03-27 22:23:24 +01:00
platform_class - > link_get_all = link_get_all ;
platform_class - > link_add = link_add ;
platform_class - > link_delete = link_delete ;
2013-04-26 11:43:08 -04:00
platform_class - > link_get_type_name = link_get_type_name ;
2015-01-22 16:41:15 +01:00
platform_class - > link_get_unmanaged = link_get_unmanaged ;
2013-03-27 22:23:24 +01:00
2015-10-12 13:44:44 +02:00
platform_class - > link_get_lnk = link_get_lnk ;
2014-02-11 13:58:00 +01:00
platform_class - > link_refresh = link_refresh ;
2016-03-08 13:02:58 +01:00
platform_class - > link_set_netns = link_set_netns ;
2013-03-27 22:23:24 +01:00
platform_class - > link_set_up = link_set_up ;
platform_class - > link_set_down = link_set_down ;
platform_class - > link_set_arp = link_set_arp ;
platform_class - > link_set_noarp = link_set_noarp ;
2013-03-27 22:23:24 +01:00
2015-06-15 14:41:35 +02:00
platform_class - > link_get_udi = link_get_udi ;
2015-06-15 15:19:28 +02:00
platform_class - > link_get_udev_device = link_get_udev_device ;
2014-10-13 08:26:52 +02:00
2014-07-24 15:57:08 -05:00
platform_class - > link_set_user_ipv6ll_enabled = link_set_user_ipv6ll_enabled ;
2016-04-30 16:48:32 +02:00
platform_class - > link_set_token = link_set_token ;
2014-07-24 15:57:08 -05:00
2013-03-27 22:53:55 +01:00
platform_class - > link_set_address = link_set_address ;
2014-10-03 17:37:26 -05:00
platform_class - > link_get_permanent_address = link_get_permanent_address ;
2013-04-15 21:48:12 +02:00
platform_class - > link_set_mtu = link_set_mtu ;
2013-03-27 22:53:55 +01:00
2013-10-11 14:59:26 -04:00
platform_class - > link_get_physical_port_id = link_get_physical_port_id ;
2015-03-24 12:35:36 -05:00
platform_class - > link_get_dev_id = link_get_dev_id ;
2014-02-05 11:56:44 +01:00
platform_class - > link_get_wake_on_lan = link_get_wake_on_lan ;
2014-10-03 13:41:49 -05:00
platform_class - > link_get_driver_info = link_get_driver_info ;
2013-10-11 14:59:26 -04:00
2013-03-27 22:53:55 +01:00
platform_class - > link_supports_carrier_detect = link_supports_carrier_detect ;
platform_class - > link_supports_vlans = link_supports_vlans ;
2013-03-27 22:53:55 +01:00
platform_class - > link_enslave = link_enslave ;
platform_class - > link_release = link_release ;
2016-03-21 15:22:10 +01:00
platform_class - > link_can_assume = link_can_assume ;
2013-03-27 22:53:55 +01:00
platform_class - > vlan_add = vlan_add ;
2015-10-27 16:14:54 +01:00
platform_class - > link_vlan_change = link_vlan_change ;
2015-10-14 10:01:48 +02:00
platform_class - > link_vxlan_add = link_vxlan_add ;
2013-03-27 22:53:55 +01:00
2015-09-15 15:07:37 +02:00
platform_class - > tun_add = tun_add ;
2013-06-10 16:21:08 -03:00
platform_class - > infiniband_partition_add = infiniband_partition_add ;
2016-04-20 09:16:21 +02:00
platform_class - > infiniband_partition_delete = infiniband_partition_delete ;
2013-06-10 16:21:08 -03:00
2014-02-04 14:27:03 +01:00
platform_class - > wifi_get_capabilities = wifi_get_capabilities ;
platform_class - > wifi_get_bssid = wifi_get_bssid ;
platform_class - > wifi_get_frequency = wifi_get_frequency ;
platform_class - > wifi_get_quality = wifi_get_quality ;
platform_class - > wifi_get_rate = wifi_get_rate ;
platform_class - > wifi_get_mode = wifi_get_mode ;
platform_class - > wifi_set_mode = wifi_set_mode ;
2014-10-23 14:19:59 -04:00
platform_class - > wifi_set_powersave = wifi_set_powersave ;
2014-02-04 14:27:03 +01:00
platform_class - > wifi_find_frequency = wifi_find_frequency ;
platform_class - > wifi_indicate_addressing_running = wifi_indicate_addressing_running ;
platform_class - > mesh_get_channel = mesh_get_channel ;
platform_class - > mesh_set_channel = mesh_set_channel ;
platform_class - > mesh_set_ssid = mesh_set_ssid ;
2015-09-01 22:11:47 +02:00
platform_class - > link_gre_add = link_gre_add ;
2015-11-27 22:22:25 +01:00
platform_class - > link_ip6tnl_add = link_ip6tnl_add ;
2016-06-30 18:20:09 +02:00
platform_class - > link_macsec_add = link_macsec_add ;
2015-12-03 17:09:50 +01:00
platform_class - > link_macvlan_add = link_macvlan_add ;
2015-11-27 14:01:56 +01:00
platform_class - > link_ipip_add = link_ipip_add ;
2015-11-11 18:41:48 +01:00
platform_class - > link_sit_add = link_sit_add ;
2015-09-01 22:11:47 +02:00
2015-07-14 12:37:58 +02:00
platform_class - > ip4_address_get = ip4_address_get ;
platform_class - > ip6_address_get = ip6_address_get ;
2013-03-27 22:23:24 +01:00
platform_class - > ip4_address_get_all = ip4_address_get_all ;
platform_class - > ip6_address_get_all = ip6_address_get_all ;
platform_class - > ip4_address_add = ip4_address_add ;
platform_class - > ip6_address_add = ip6_address_add ;
platform_class - > ip4_address_delete = ip4_address_delete ;
platform_class - > ip6_address_delete = ip6_address_delete ;
2013-03-27 22:23:24 +01:00
2015-07-14 12:37:58 +02:00
platform_class - > ip4_route_get = ip4_route_get ;
platform_class - > ip6_route_get = ip6_route_get ;
2013-03-27 22:23:24 +01:00
platform_class - > ip4_route_get_all = ip4_route_get_all ;
platform_class - > ip6_route_get_all = ip6_route_get_all ;
platform_class - > ip4_route_add = ip4_route_add ;
platform_class - > ip6_route_add = ip6_route_add ;
platform_class - > ip4_route_delete = ip4_route_delete ;
platform_class - > ip6_route_delete = ip6_route_delete ;
2014-01-07 17:21:12 +01:00
platform_class - > check_support_kernel_extended_ifa_flags = check_support_kernel_extended_ifa_flags ;
2014-07-24 15:57:08 -05:00
platform_class - > check_support_user_ipv6ll = check_support_user_ipv6ll ;
2015-06-19 15:38:41 +02:00
platform_class - > process_events = process_events ;
2013-03-27 22:23:24 +01:00
}
2015-04-06 18:29:36 +02:00