2013-03-27 22:23:24 +01:00
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: t; c-basic-offset: 4 -*- */
/* nm-linux-platform.c - Linux kernel & udev network configuration layer
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 , or ( at your option )
* any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License along
* with this program ; if not , write to the Free Software Foundation , Inc . ,
* 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA .
*
2015-04-14 23:14:06 +02:00
* Copyright ( C ) 2012 - 2015 Red Hat , Inc .
2013-03-27 22:23:24 +01:00
*/
2014-11-13 10:07:02 -05:00
# include "config.h"
2013-03-27 22:23:24 +01:00
# include <errno.h>
# include <unistd.h>
# include <sys/socket.h>
2013-04-03 16:10:38 +02:00
# include <fcntl.h>
2014-02-23 14:22:32 +01:00
# include <dlfcn.h>
2013-03-27 22:23:24 +01:00
# include <netinet/icmp6.h>
# include <netinet/in.h>
2013-05-21 12:49:24 -03:00
# include <linux/ip.h>
2013-03-27 22:23:24 +01:00
# include <linux/if_arp.h>
2013-05-06 09:16:17 -04:00
# include <linux/if_link.h>
2013-04-25 15:46:39 -04:00
# include <linux/if_tun.h>
2013-05-21 12:49:24 -03:00
# include <linux/if_tunnel.h>
2013-03-27 22:23:24 +01:00
# include <netlink/netlink.h>
# include <netlink/object.h>
# include <netlink/cache.h>
# include <netlink/route/link.h>
# include <netlink/route/link/vlan.h>
2013-03-27 22:23:24 +01:00
# include <netlink/route/addr.h>
2013-03-27 22:23:24 +01:00
# include <netlink/route/route.h>
2013-05-29 12:00:50 -03:00
# include <gudev/gudev.h>
2013-03-27 22:23:24 +01:00
2014-09-29 17:58:44 +02:00
# if HAVE_LIBNL_INET6_ADDR_GEN_MODE || HAVE_LIBNL_INET6_TOKEN
2014-07-24 15:57:08 -05:00
# include <netlink/route/link/inet6.h>
2014-09-29 17:58:44 +02:00
# if HAVE_LIBNL_INET6_ADDR_GEN_MODE && HAVE_KERNEL_INET6_ADDR_GEN_MODE
2014-07-24 15:57:08 -05:00
# include <linux/if_link.h>
# else
# define IN6_ADDR_GEN_MODE_EUI64 0
# define IN6_ADDR_GEN_MODE_NONE 1
# endif
# endif
2015-02-22 11:54:03 +01:00
# include "nm-core-internal.h"
2013-12-10 19:04:22 +01:00
# include "NetworkManagerUtils.h"
2013-03-27 22:23:24 +01:00
# include "nm-linux-platform.h"
2015-05-02 07:59:59 +02:00
# include "nm-platform-utils.h"
2014-01-21 11:04:26 +01:00
# include "NetworkManagerUtils.h"
# include "nm-utils.h"
2015-07-17 14:38:54 +02:00
# include "nm-default.h"
2013-10-16 12:29:13 -05:00
# include "wifi/wifi-utils.h"
2014-02-04 14:27:03 +01:00
# include "wifi/wifi-utils-wext.h"
2015-04-14 23:15:27 +02:00
# include "nmp-object.h"
2013-03-27 22:23:24 +01:00
/* This is only included for the translation of VLAN flags */
# include "nm-setting-vlan.h"
2015-04-18 14:21:54 +02:00
/*********************************************************************************************/
2015-08-20 00:07:14 +02:00
# define _NMLOG_DOMAIN LOGD_PLATFORM
# define _NMLOG_PREFIX_NAME "platform-linux"
# define _NMLOG(level, ...) _LOG(level, _NMLOG_DOMAIN, platform, __VA_ARGS__)
2015-04-18 14:21:54 +02:00
# define _LOG(level, domain, self, ...) \
G_STMT_START { \
const NMLogLevel __level = ( level ) ; \
const NMLogDomain __domain = ( domain ) ; \
\
if ( nm_logging_enabled ( __level , __domain ) ) { \
char __prefix [ 32 ] ; \
2015-08-20 00:07:14 +02:00
const char * __p_prefix = _NMLOG_PREFIX_NAME ; \
2015-04-18 14:21:54 +02:00
const void * const __self = ( self ) ; \
\
if ( __self & & __self ! = nm_platform_try_get ( ) ) { \
2015-08-20 00:07:14 +02:00
g_snprintf ( __prefix , sizeof ( __prefix ) , " %s[%p] " , _NMLOG_PREFIX_NAME , __self ) ; \
2015-04-18 14:21:54 +02:00
__p_prefix = __prefix ; \
} \
_nm_log ( __level , __domain , 0 , \
" %s: " _NM_UTILS_MACRO_FIRST ( __VA_ARGS__ ) , \
__p_prefix _NM_UTILS_MACRO_REST ( __VA_ARGS__ ) ) ; \
} \
} G_STMT_END
2015-08-17 17:48:37 +02:00
# define trace(...) _LOG (LOGL_TRACE, _NMLOG_DOMAIN, NULL, __VA_ARGS__)
2015-08-20 00:07:14 +02:00
# define debug(...) _LOG (LOGL_DEBUG, _NMLOG_DOMAIN, NULL, __VA_ARGS__)
2015-08-20 17:50:36 +02:00
# define info(...) _LOG (LOGL_INFO, _NMLOG_DOMAIN, NULL, __VA_ARGS__)
2015-08-20 00:07:14 +02:00
# define warning(...) _LOG (LOGL_WARN , _NMLOG_DOMAIN, NULL, __VA_ARGS__)
# define error(...) _LOG (LOGL_ERR , _NMLOG_DOMAIN, NULL, __VA_ARGS__)
2013-03-27 22:23:24 +01:00
2015-04-06 18:29:36 +02:00
/******************************************************************
* Forward declarations and enums
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
typedef enum {
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
DELAYED_ACTION_TYPE_NONE = 0 ,
DELAYED_ACTION_TYPE_REFRESH_ALL_LINKS = ( 1LL < < 0 ) ,
DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ADDRESSES = ( 1LL < < 1 ) ,
DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ADDRESSES = ( 1LL < < 2 ) ,
DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ROUTES = ( 1LL < < 3 ) ,
DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ROUTES = ( 1LL < < 4 ) ,
DELAYED_ACTION_TYPE_REFRESH_LINK = ( 1LL < < 5 ) ,
DELAYED_ACTION_TYPE_MASTER_CONNECTED = ( 1LL < < 6 ) ,
DELAYED_ACTION_TYPE_READ_NETLINK = ( 1LL < < 7 ) ,
__DELAYED_ACTION_TYPE_MAX ,
DELAYED_ACTION_TYPE_REFRESH_ALL = DELAYED_ACTION_TYPE_REFRESH_ALL_LINKS |
DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ADDRESSES |
DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ADDRESSES |
DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ROUTES |
DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ROUTES ,
DELAYED_ACTION_TYPE_MAX = __DELAYED_ACTION_TYPE_MAX - 1 ,
2015-04-06 18:29:36 +02:00
} DelayedActionType ;
2015-04-23 23:16:00 +02:00
static gboolean tun_get_properties_ifname ( NMPlatform * platform , const char * ifname , NMPlatformTunProperties * props ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
static void delayed_action_schedule ( NMPlatform * platform , DelayedActionType action_type , gpointer user_data ) ;
2015-06-19 15:38:41 +02:00
static gboolean delayed_action_handle_all ( NMPlatform * platform , gboolean read_netlink ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
static void do_request_link ( NMPlatform * platform , int ifindex , const char * name , gboolean handle_delayed_action ) ;
static void do_request_all ( NMPlatform * platform , DelayedActionType action_type , gboolean handle_delayed_action ) ;
2015-04-06 18:29:36 +02:00
static void cache_pre_hook ( NMPCache * cache , const NMPObject * old , const NMPObject * new , NMPCacheOpsType ops_type , gpointer user_data ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
static gboolean event_handler_read_netlink_all ( NMPlatform * platform , gboolean wait_for_acks ) ;
static NMPCacheOpsType cache_remove_netlink ( NMPlatform * platform , const NMPObject * obj_needle , NMPObject * * out_obj_cache , gboolean * out_was_visible , NMPlatformReason reason ) ;
2015-04-23 23:16:00 +02:00
2014-10-22 18:19:54 +02:00
/******************************************************************
* libnl unility functions and wrappers
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2014-02-23 14:22:32 +01:00
struct libnl_vtable
{
void * handle ;
2015-08-17 17:48:37 +02:00
void * handle_route ;
2014-02-23 14:22:32 +01:00
int ( * f_nl_has_capability ) ( int capability ) ;
2015-08-17 17:48:37 +02:00
int ( * f_rtnl_link_get_link_netnsid ) ( const struct rtnl_link * link , gint32 * out_link_netnsid ) ;
2014-02-23 14:22:32 +01:00
} ;
static int
_nl_f_nl_has_capability ( int capability )
{
return FALSE ;
}
2015-08-17 17:31:44 +02:00
static const struct libnl_vtable *
2014-10-29 10:44:31 +01:00
_nl_get_vtable ( void )
2014-02-23 14:22:32 +01:00
{
static struct libnl_vtable vtable ;
if ( G_UNLIKELY ( ! vtable . f_nl_has_capability ) ) {
2015-08-25 22:24:10 +02:00
vtable . handle = dlopen ( " libnl-3.so.200 " , RTLD_LAZY | RTLD_NOLOAD ) ;
if ( vtable . handle ) {
vtable . f_nl_has_capability = dlsym ( vtable . handle , " nl_has_capability " ) ;
2014-02-23 14:22:32 +01:00
}
2015-08-17 17:48:37 +02:00
vtable . handle_route = dlopen ( " libnl-route-3.so.200 " , RTLD_LAZY | RTLD_NOLOAD ) ;
if ( vtable . handle_route ) {
vtable . f_rtnl_link_get_link_netnsid = dlsym ( vtable . handle_route , " rtnl_link_get_link_netnsid " ) ;
}
2014-02-23 14:22:32 +01:00
if ( ! vtable . f_nl_has_capability )
vtable . f_nl_has_capability = & _nl_f_nl_has_capability ;
2015-08-17 17:48:37 +02:00
trace ( " libnl: rtnl_link_get_link_netnsid() %s " , vtable . f_rtnl_link_get_link_netnsid ? " supported " : " not supported " ) ;
2015-08-27 18:58:41 +02:00
g_return_val_if_fail ( vtable . handle , & vtable ) ;
g_return_val_if_fail ( vtable . handle_route , & vtable ) ;
2014-02-23 14:22:32 +01:00
}
return & vtable ;
}
static gboolean
_nl_has_capability ( int capability )
{
return ( _nl_get_vtable ( ) - > f_nl_has_capability ) ( capability ) ;
}
2015-08-17 17:48:37 +02:00
static int
_rtnl_link_get_link_netnsid ( const struct rtnl_link * link , gint32 * out_link_netnsid )
{
const struct libnl_vtable * vtable ;
g_return_val_if_fail ( link , - NLE_INVAL ) ;
g_return_val_if_fail ( out_link_netnsid , - NLE_INVAL ) ;
vtable = _nl_get_vtable ( ) ;
return vtable - > f_rtnl_link_get_link_netnsid
? vtable - > f_rtnl_link_get_link_netnsid ( link , out_link_netnsid )
: - NLE_OPNOTSUPP ;
}
gboolean
nm_platform_check_support_libnl_link_netnsid ( void )
{
return ! ! ( _nl_get_vtable ( ) - > f_rtnl_link_get_link_netnsid ) ;
}
2013-03-27 22:23:24 +01:00
/* Automatic deallocation of local variables */
2015-05-29 09:38:26 +02:00
# define auto_nl_object __attribute__((cleanup(_nl_auto_nl_object)))
2013-03-27 22:23:24 +01:00
static void
2015-05-29 09:38:26 +02:00
_nl_auto_nl_object ( void * ptr )
2013-03-27 22:23:24 +01:00
{
struct nl_object * * object = ptr ;
if ( object & & * object ) {
nl_object_put ( * object ) ;
* object = NULL ;
}
}
2015-05-29 09:38:26 +02:00
# define auto_nl_addr __attribute__((cleanup(_nl_auto_nl_addr)))
2013-03-27 22:23:24 +01:00
static void
2015-05-29 09:38:26 +02:00
_nl_auto_nl_addr ( void * ptr )
2013-03-27 22:23:24 +01:00
{
struct nl_addr * * object = ptr ;
if ( object & & * object ) {
nl_addr_put ( * object ) ;
* object = NULL ;
}
}
2014-04-29 16:00:48 +02:00
/* wrap the libnl alloc functions and abort on out-of-memory*/
static struct nl_addr *
2015-05-29 09:38:26 +02:00
_nl_addr_build ( int family , const void * buf , size_t size )
2014-04-29 16:00:48 +02:00
{
struct nl_addr * addr ;
addr = nl_addr_build ( family , ( void * ) buf , size ) ;
if ( ! addr )
g_error ( " nl_addr_build() failed with out of memory " ) ;
return addr ;
}
static struct rtnl_link *
2015-05-29 09:38:26 +02:00
_nl_rtnl_link_alloc ( int ifindex , const char * name )
2014-04-29 16:00:48 +02:00
{
struct rtnl_link * rtnllink ;
rtnllink = rtnl_link_alloc ( ) ;
if ( ! rtnllink )
g_error ( " rtnl_link_alloc() failed with out of memory " ) ;
if ( ifindex > 0 )
rtnl_link_set_ifindex ( rtnllink , ifindex ) ;
if ( name )
rtnl_link_set_name ( rtnllink , name ) ;
return rtnllink ;
}
static struct rtnl_addr *
2015-05-29 09:38:26 +02:00
_nl_rtnl_addr_alloc ( int ifindex )
2014-04-29 16:00:48 +02:00
{
struct rtnl_addr * rtnladdr ;
rtnladdr = rtnl_addr_alloc ( ) ;
if ( ! rtnladdr )
g_error ( " rtnl_addr_alloc() failed with out of memory " ) ;
if ( ifindex > 0 )
rtnl_addr_set_ifindex ( rtnladdr , ifindex ) ;
return rtnladdr ;
}
static struct rtnl_route *
2015-05-29 09:38:26 +02:00
_nl_rtnl_route_alloc ( void )
2014-04-29 16:00:48 +02:00
{
struct rtnl_route * rtnlroute = rtnl_route_alloc ( ) ;
if ( ! rtnlroute )
g_error ( " rtnl_route_alloc() failed with out of memory " ) ;
return rtnlroute ;
}
static struct rtnl_nexthop *
2015-05-29 09:38:26 +02:00
_nl_rtnl_route_nh_alloc ( void )
2014-04-29 16:00:48 +02:00
{
struct rtnl_nexthop * nexthop ;
nexthop = rtnl_route_nh_alloc ( ) ;
if ( ! nexthop )
g_error ( " rtnl_route_nh_alloc () failed with out of memory " ) ;
return nexthop ;
}
2013-03-27 22:23:24 +01:00
/* rtnl_addr_set_prefixlen fails to update the nl_addr prefixlen */
static void
2015-05-29 09:38:26 +02:00
_nl_rtnl_addr_set_prefixlen ( struct rtnl_addr * rtnladdr , int plen )
2013-03-27 22:23:24 +01:00
{
struct nl_addr * nladdr ;
rtnl_addr_set_prefixlen ( rtnladdr , plen ) ;
nladdr = rtnl_addr_get_local ( rtnladdr ) ;
if ( nladdr )
nl_addr_set_prefixlen ( nladdr , plen ) ;
}
2015-05-29 11:12:15 +02:00
static const char *
_nl_nlmsg_type_to_str ( guint16 type , char * buf , gsize len )
{
const char * str_type = NULL ;
switch ( type ) {
case RTM_NEWLINK : str_type = " NEWLINK " ; break ;
case RTM_DELLINK : str_type = " DELLINK " ; break ;
case RTM_NEWADDR : str_type = " NEWADDR " ; break ;
case RTM_DELADDR : str_type = " DELADDR " ; break ;
case RTM_NEWROUTE : str_type = " NEWROUTE " ; break ;
case RTM_DELROUTE : str_type = " DELROUTE " ; break ;
}
if ( str_type )
g_strlcpy ( buf , str_type , len ) ;
else
g_snprintf ( buf , len , " (%d) " , type ) ;
return buf ;
}
2014-10-22 18:19:54 +02:00
/******************************************************************/
2015-05-29 09:38:26 +02:00
/* _nl_link_parse_info_data(): Re-fetches a link from the kernel
2015-05-29 09:40:24 +02:00
* and parses its IFLA_INFO_DATA using a caller - provided parser .
*
* Code is stolen from rtnl_link_get_kernel ( ) , nl_pickup ( ) , and link_msg_parser ( ) .
*/
typedef int ( * NMNLInfoDataParser ) ( struct nlattr * info_data , gpointer parser_data ) ;
typedef struct {
NMNLInfoDataParser parser ;
gpointer parser_data ;
} NMNLInfoDataClosure ;
static struct nla_policy info_data_link_policy [ IFLA_MAX + 1 ] = {
[ IFLA_LINKINFO ] = { . type = NLA_NESTED } ,
} ;
static struct nla_policy info_data_link_info_policy [ IFLA_INFO_MAX + 1 ] = {
[ IFLA_INFO_DATA ] = { . type = NLA_NESTED } ,
} ;
static int
2015-05-29 09:38:26 +02:00
_nl_link_parse_info_data_cb ( struct nl_msg * msg , void * arg )
2015-05-29 09:40:24 +02:00
{
NMNLInfoDataClosure * closure = arg ;
struct nlmsghdr * n = nlmsg_hdr ( msg ) ;
struct nlattr * tb [ IFLA_MAX + 1 ] ;
struct nlattr * li [ IFLA_INFO_MAX + 1 ] ;
int err ;
if ( ! nlmsg_valid_hdr ( n , sizeof ( struct ifinfomsg ) ) )
return - NLE_MSG_TOOSHORT ;
err = nlmsg_parse ( n , sizeof ( struct ifinfomsg ) , tb , IFLA_MAX , info_data_link_policy ) ;
if ( err < 0 )
return err ;
if ( ! tb [ IFLA_LINKINFO ] )
return - NLE_MISSING_ATTR ;
err = nla_parse_nested ( li , IFLA_INFO_MAX , tb [ IFLA_LINKINFO ] , info_data_link_info_policy ) ;
if ( err < 0 )
return err ;
if ( ! li [ IFLA_INFO_DATA ] )
return - NLE_MISSING_ATTR ;
return closure - > parser ( li [ IFLA_INFO_DATA ] , closure - > parser_data ) ;
}
static int
2015-05-29 09:38:26 +02:00
_nl_link_parse_info_data ( struct nl_sock * sk , int ifindex ,
NMNLInfoDataParser parser , gpointer parser_data )
2015-05-29 09:40:24 +02:00
{
NMNLInfoDataClosure data = { . parser = parser , . parser_data = parser_data } ;
struct nl_msg * msg = NULL ;
struct nl_cb * cb ;
int err ;
err = rtnl_link_build_get_request ( ifindex , NULL , & msg ) ;
if ( err < 0 )
return err ;
err = nl_send_auto ( sk , msg ) ;
nlmsg_free ( msg ) ;
if ( err < 0 )
return err ;
cb = nl_cb_clone ( nl_socket_get_cb ( sk ) ) ;
if ( cb = = NULL )
return - NLE_NOMEM ;
2015-05-29 09:38:26 +02:00
nl_cb_set ( cb , NL_CB_VALID , NL_CB_CUSTOM , _nl_link_parse_info_data_cb , & data ) ;
2015-05-29 09:40:24 +02:00
err = nl_recvmsgs ( sk , cb ) ;
nl_cb_put ( cb ) ;
if ( err < 0 )
return err ;
nl_wait_for_ack ( sk ) ;
return 0 ;
}
/******************************************************************/
2015-05-07 10:16:15 +02:00
static int
_nl_sock_flush_data ( struct nl_sock * sk )
{
int nle ;
struct nl_cb * cb ;
cb = nl_cb_clone ( nl_socket_get_cb ( sk ) ) ;
if ( cb = = NULL )
return - NLE_NOMEM ;
nl_cb_set ( cb , NL_CB_VALID , NL_CB_DEFAULT , NULL , NULL ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
nl_cb_set ( cb , NL_CB_SEQ_CHECK , NL_CB_DEFAULT , NULL , NULL ) ;
nl_cb_err ( cb , NL_CB_DEFAULT , NULL , NULL ) ;
2015-05-07 10:16:15 +02:00
do {
errno = 0 ;
nle = nl_recvmsgs ( sk , cb ) ;
/* Work around a libnl bug fixed in 3.2.22 (375a6294) */
if ( nle = = 0 & & ( errno = = EAGAIN | | errno = = EWOULDBLOCK ) )
nle = - NLE_AGAIN ;
} while ( nle ! = - NLE_AGAIN ) ;
nl_cb_put ( cb ) ;
return nle ;
}
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
static void
_nl_msg_set_seq ( struct nl_sock * sk , struct nl_msg * msg , guint32 * out_seq )
{
guint32 seq ;
/* choose our own sequence number, because libnl does not ensure that
* it isn ' t zero - - which would confuse our checking for outstanding
* messages . */
seq = nl_socket_use_seq ( sk ) ;
if ( seq = = 0 )
seq = nl_socket_use_seq ( sk ) ;
nlmsg_hdr ( msg ) - > nlmsg_seq = seq ;
if ( out_seq )
* out_seq = seq ;
}
static int
_nl_sock_request_link ( NMPlatform * platform , struct nl_sock * sk , int ifindex , const char * name , guint32 * out_seq )
{
struct nl_msg * msg = NULL ;
int err ;
if ( name & & ! name [ 0 ] )
name = NULL ;
g_return_val_if_fail ( ifindex > 0 | | name , - NLE_INVAL ) ;
_LOGT ( " sock: request-link %d%s%s%s " , ifindex , name ? " , \" " : " " , name ? name : " " , name ? " \" " : " " ) ;
if ( ( err = rtnl_link_build_get_request ( ifindex , name , & msg ) ) < 0 )
return err ;
_nl_msg_set_seq ( sk , msg , out_seq ) ;
err = nl_send_auto ( sk , msg ) ;
nlmsg_free ( msg ) ;
if ( err < 0 )
return err ;
return 0 ;
}
static int
2015-06-19 16:24:18 +02:00
_nl_sock_request_all ( NMPlatform * platform , struct nl_sock * sk , NMPObjectType obj_type , guint32 * out_seq )
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
{
const NMPClass * klass ;
struct rtgenmsg gmsg = { 0 } ;
struct nl_msg * msg ;
int err ;
klass = nmp_class_from_type ( obj_type ) ;
_LOGT ( " sock: request-all-%s " , klass - > obj_type_name ) ;
/* reimplement
* nl_rtgen_request ( sk , klass - > rtm_gettype , klass - > addr_family , NLM_F_DUMP ) ;
* because we need the sequence number .
*/
msg = nlmsg_alloc_simple ( klass - > rtm_gettype , NLM_F_DUMP ) ;
if ( ! msg )
return - NLE_NOMEM ;
gmsg . rtgen_family = klass - > addr_family ;
err = nlmsg_append ( msg , & gmsg , sizeof ( gmsg ) , NLMSG_ALIGNTO ) ;
if ( err < 0 )
goto errout ;
_nl_msg_set_seq ( sk , msg , out_seq ) ;
err = nl_send_auto ( sk , msg ) ;
errout :
nlmsg_free ( msg ) ;
return err > = 0 ? 0 : err ;
}
2015-05-07 10:16:15 +02:00
/******************************************************************/
2015-04-13 18:21:55 +02:00
# if HAVE_LIBNL_INET6_ADDR_GEN_MODE
static int _support_user_ipv6ll = 0 ;
2015-05-11 12:52:36 +02:00
# define _support_user_ipv6ll_still_undecided() (G_UNLIKELY (_support_user_ipv6ll == 0))
# else
# define _support_user_ipv6ll_still_undecided() (FALSE)
2015-04-13 18:21:55 +02:00
# endif
static gboolean
2015-06-10 10:01:49 +02:00
_support_user_ipv6ll_get ( void )
2015-04-13 18:21:55 +02:00
{
# if HAVE_LIBNL_INET6_ADDR_GEN_MODE
2015-05-11 12:52:36 +02:00
if ( _support_user_ipv6ll_still_undecided ( ) ) {
2015-04-13 18:21:55 +02:00
_support_user_ipv6ll = - 1 ;
nm_log_warn ( LOGD_PLATFORM , " kernel support for IFLA_INET6_ADDR_GEN_MODE %s " , " failed to detect; assume no support " ) ;
} else
return _support_user_ipv6ll > 0 ;
# endif
return FALSE ;
}
static void
_support_user_ipv6ll_detect ( const struct rtnl_link * rtnl_link )
{
# if HAVE_LIBNL_INET6_ADDR_GEN_MODE
/* If we ever see a link with valid IPv6 link-local address
* generation modes , the kernel supports it .
*/
2015-05-11 12:52:36 +02:00
if ( _support_user_ipv6ll_still_undecided ( ) ) {
2015-04-13 18:21:55 +02:00
uint8_t mode ;
if ( rtnl_link_inet6_get_addr_gen_mode ( ( struct rtnl_link * ) rtnl_link , & mode ) = = 0 ) {
_support_user_ipv6ll = 1 ;
nm_log_dbg ( LOGD_PLATFORM , " kernel support for IFLA_INET6_ADDR_GEN_MODE %s " , " detected " ) ;
} else {
_support_user_ipv6ll = - 1 ;
nm_log_dbg ( LOGD_PLATFORM , " kernel support for IFLA_INET6_ADDR_GEN_MODE %s " , " not detected " ) ;
}
}
# endif
}
2015-05-06 11:55:02 +02:00
/******************************************************************/
static int _support_kernel_extended_ifa_flags = 0 ;
# define _support_kernel_extended_ifa_flags_still_undecided() (G_UNLIKELY (_support_kernel_extended_ifa_flags == 0))
static void
_support_kernel_extended_ifa_flags_detect ( struct nl_msg * msg )
{
2015-06-25 17:49:09 +02:00
struct nlmsghdr * msg_hdr ;
2015-05-06 11:55:02 +02:00
if ( ! _support_kernel_extended_ifa_flags_still_undecided ( ) )
return ;
msg_hdr = nlmsg_hdr ( msg ) ;
if ( msg_hdr - > nlmsg_type ! = RTM_NEWADDR )
return ;
/* the extended address flags are only set for AF_INET6 */
if ( ( ( struct ifaddrmsg * ) nlmsg_data ( msg_hdr ) ) - > ifa_family ! = AF_INET6 )
return ;
/* see if the nl_msg contains the IFA_FLAGS attribute. If it does,
* we assume , that the kernel supports extended flags , IFA_F_MANAGETEMPADDR
* and IFA_F_NOPREFIXROUTE ( they were added together ) .
* */
_support_kernel_extended_ifa_flags =
nlmsg_find_attr ( msg_hdr , sizeof ( struct ifaddrmsg ) , 8 /* IFA_FLAGS */ )
? 1 : - 1 ;
}
static gboolean
2015-06-10 10:01:49 +02:00
_support_kernel_extended_ifa_flags_get ( void )
2015-05-06 11:55:02 +02:00
{
if ( _support_kernel_extended_ifa_flags_still_undecided ( ) ) {
nm_log_warn ( LOGD_PLATFORM , " Unable to detect kernel support for extended IFA_FLAGS. Assume no kernel support. " ) ;
_support_kernel_extended_ifa_flags = - 1 ;
}
return _support_kernel_extended_ifa_flags > 0 ;
}
2015-05-29 09:40:24 +02:00
/******************************************************************
* Object type specific utilities
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
static guint
2015-05-29 09:55:51 +02:00
_nm_ip_config_source_to_rtprot ( NMIPConfigSource source )
2015-05-29 09:40:24 +02:00
{
switch ( source ) {
case NM_IP_CONFIG_SOURCE_UNKNOWN :
return RTPROT_UNSPEC ;
case NM_IP_CONFIG_SOURCE_KERNEL :
2015-06-23 17:00:24 +02:00
case NM_IP_CONFIG_SOURCE_RTPROT_KERNEL :
2015-05-29 09:40:24 +02:00
return RTPROT_KERNEL ;
case NM_IP_CONFIG_SOURCE_DHCP :
return RTPROT_DHCP ;
case NM_IP_CONFIG_SOURCE_RDISC :
return RTPROT_RA ;
default :
return RTPROT_STATIC ;
}
}
static NMIPConfigSource
2015-05-29 09:55:51 +02:00
_nm_ip_config_source_from_rtprot ( guint rtprot )
2015-05-29 09:40:24 +02:00
{
switch ( rtprot ) {
case RTPROT_UNSPEC :
return NM_IP_CONFIG_SOURCE_UNKNOWN ;
case RTPROT_KERNEL :
2015-06-23 17:00:24 +02:00
return NM_IP_CONFIG_SOURCE_RTPROT_KERNEL ;
2015-05-29 09:40:24 +02:00
case RTPROT_REDIRECT :
return NM_IP_CONFIG_SOURCE_KERNEL ;
case RTPROT_RA :
return NM_IP_CONFIG_SOURCE_RDISC ;
case RTPROT_DHCP :
return NM_IP_CONFIG_SOURCE_DHCP ;
default :
return NM_IP_CONFIG_SOURCE_USER ;
}
}
/******************************************************************/
typedef struct {
const NMLinkType nm_type ;
const char * type_string ;
/* IFLA_INFO_KIND / rtnl_link_get_type() where applicable; the rtnl type
* should only be specified if the device type can be created without
* additional parameters , and if the device type can be determined from
* the rtnl_type . eg , tun / tap should not be specified since both
* tun and tap devices use " tun " , and InfiniBand should not be
* specified because a PKey is required at creation . Drivers set this
* value from their ' struct rtnl_link_ops ' structure .
*/
const char * rtnl_type ;
/* uevent DEVTYPE where applicable, from /sys/class/net/<ifname>/uevent;
* drivers set this value from their SET_NETDEV_DEV ( ) call and the
* ' struct device_type ' name member .
*/
const char * devtype ;
} LinkDesc ;
static const LinkDesc linktypes [ ] = {
{ NM_LINK_TYPE_NONE , " none " , NULL , NULL } ,
{ NM_LINK_TYPE_UNKNOWN , " unknown " , NULL , NULL } ,
{ NM_LINK_TYPE_ETHERNET , " ethernet " , NULL , NULL } ,
{ NM_LINK_TYPE_INFINIBAND , " infiniband " , NULL , NULL } ,
{ NM_LINK_TYPE_OLPC_MESH , " olpc-mesh " , NULL , NULL } ,
{ NM_LINK_TYPE_WIFI , " wifi " , NULL , " wlan " } ,
{ NM_LINK_TYPE_WWAN_ETHERNET , " wwan " , NULL , " wwan " } ,
{ NM_LINK_TYPE_WIMAX , " wimax " , " wimax " , " wimax " } ,
{ NM_LINK_TYPE_DUMMY , " dummy " , " dummy " , NULL } ,
{ NM_LINK_TYPE_GRE , " gre " , " gre " , NULL } ,
{ NM_LINK_TYPE_GRETAP , " gretap " , " gretap " , NULL } ,
{ NM_LINK_TYPE_IFB , " ifb " , " ifb " , NULL } ,
{ NM_LINK_TYPE_LOOPBACK , " loopback " , NULL , NULL } ,
{ NM_LINK_TYPE_MACVLAN , " macvlan " , " macvlan " , NULL } ,
{ NM_LINK_TYPE_MACVTAP , " macvtap " , " macvtap " , NULL } ,
{ NM_LINK_TYPE_OPENVSWITCH , " openvswitch " , " openvswitch " , NULL } ,
{ NM_LINK_TYPE_TAP , " tap " , NULL , NULL } ,
{ NM_LINK_TYPE_TUN , " tun " , NULL , NULL } ,
{ NM_LINK_TYPE_VETH , " veth " , " veth " , NULL } ,
{ NM_LINK_TYPE_VLAN , " vlan " , " vlan " , " vlan " } ,
{ NM_LINK_TYPE_VXLAN , " vxlan " , " vxlan " , " vxlan " } ,
{ NM_LINK_TYPE_BNEP , " bluetooth " , NULL , " bluetooth " } ,
{ NM_LINK_TYPE_BRIDGE , " bridge " , " bridge " , " bridge " } ,
{ NM_LINK_TYPE_BOND , " bond " , " bond " , " bond " } ,
{ NM_LINK_TYPE_TEAM , " team " , " team " , NULL } ,
} ;
static const char *
nm_link_type_to_rtnl_type_string ( NMLinkType type )
{
int i ;
for ( i = 0 ; i < G_N_ELEMENTS ( linktypes ) ; i + + ) {
if ( type = = linktypes [ i ] . nm_type )
return linktypes [ i ] . rtnl_type ;
}
g_return_val_if_reached ( NULL ) ;
}
const char *
nm_link_type_to_string ( NMLinkType type )
{
int i ;
for ( i = 0 ; i < G_N_ELEMENTS ( linktypes ) ; i + + ) {
if ( type = = linktypes [ i ] . nm_type )
return linktypes [ i ] . type_string ;
}
g_return_val_if_reached ( NULL ) ;
}
2014-10-22 18:19:54 +02:00
/******************************************************************
* NMPlatform types and functions
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2015-05-10 09:16:31 +02:00
typedef struct _NMLinuxPlatformPrivate NMLinuxPlatformPrivate ;
struct _NMLinuxPlatformPrivate {
2014-10-22 18:19:54 +02:00
struct nl_sock * nlh ;
struct nl_sock * nlh_event ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
guint32 nlh_seq_expect ;
guint32 nlh_seq_last ;
2015-04-06 18:29:36 +02:00
NMPCache * cache ;
2014-10-22 18:19:54 +02:00
GIOChannel * event_channel ;
guint event_id ;
2015-08-30 15:51:20 +02:00
GHashTable * sysctl_get_prev_values ;
2014-10-22 18:19:54 +02:00
GUdevClient * udev_client ;
2015-04-06 18:29:36 +02:00
struct {
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
DelayedActionType flags ;
GPtrArray * list_master_connected ;
GPtrArray * list_refresh_link ;
2015-04-06 18:29:36 +02:00
gint is_handling ;
guint idle_id ;
} delayed_action ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
GHashTable * prune_candidates ;
GHashTable * delayed_deletion ;
2014-10-22 18:19:54 +02:00
GHashTable * wifi_data ;
2015-05-10 09:16:31 +02:00
} ;
static inline NMLinuxPlatformPrivate *
NM_LINUX_PLATFORM_GET_PRIVATE ( const void * self )
{
nm_assert ( NM_IS_LINUX_PLATFORM ( self ) ) ;
2014-10-22 18:19:54 +02:00
2015-05-10 09:16:31 +02:00
return ( ( NMLinuxPlatform * ) self ) - > priv ;
}
2014-10-22 18:19:54 +02:00
G_DEFINE_TYPE ( NMLinuxPlatform , nm_linux_platform , NM_TYPE_PLATFORM )
void
nm_linux_platform_setup ( void )
{
2015-05-12 07:34:56 +02:00
g_object_new ( NM_TYPE_LINUX_PLATFORM ,
NM_PLATFORM_REGISTER_SINGLETON , TRUE ,
NULL ) ;
2014-10-22 18:19:54 +02:00
}
/******************************************************************/
2015-06-19 16:24:18 +02:00
NMPObjectType
2015-04-06 18:36:57 +02:00
_nlo_get_object_type ( const struct nl_object * object )
2013-03-27 22:23:24 +01:00
{
2014-02-11 22:10:05 +01:00
const char * type_str ;
2013-07-27 00:42:10 +02:00
2014-02-11 22:10:05 +01:00
if ( ! object | | ! ( type_str = nl_object_get_type ( object ) ) )
2015-06-24 15:29:01 +02:00
return NMP_OBJECT_TYPE_UNKNOWN ;
2013-03-27 22:23:24 +01:00
2013-07-27 00:42:10 +02:00
if ( ! strcmp ( type_str , " route/link " ) )
2015-06-19 16:24:18 +02:00
return NMP_OBJECT_TYPE_LINK ;
2013-07-27 00:42:10 +02:00
else if ( ! strcmp ( type_str , " route/addr " ) ) {
2013-03-27 22:23:24 +01:00
switch ( rtnl_addr_get_family ( ( struct rtnl_addr * ) object ) ) {
case AF_INET :
2015-06-19 16:24:18 +02:00
return NMP_OBJECT_TYPE_IP4_ADDRESS ;
2013-03-27 22:23:24 +01:00
case AF_INET6 :
2015-06-19 16:24:18 +02:00
return NMP_OBJECT_TYPE_IP6_ADDRESS ;
2013-03-27 22:23:24 +01:00
default :
2015-06-24 15:29:01 +02:00
return NMP_OBJECT_TYPE_UNKNOWN ;
2013-03-27 22:23:24 +01:00
}
2013-07-27 00:42:10 +02:00
} else if ( ! strcmp ( type_str , " route/route " ) ) {
2013-03-27 22:23:24 +01:00
switch ( rtnl_route_get_family ( ( struct rtnl_route * ) object ) ) {
case AF_INET :
2015-06-19 16:24:18 +02:00
return NMP_OBJECT_TYPE_IP4_ROUTE ;
2013-03-27 22:23:24 +01:00
case AF_INET6 :
2015-06-19 16:24:18 +02:00
return NMP_OBJECT_TYPE_IP6_ROUTE ;
2013-03-27 22:23:24 +01:00
default :
2015-06-24 15:29:01 +02:00
return NMP_OBJECT_TYPE_UNKNOWN ;
2013-03-27 22:23:24 +01:00
}
2013-03-27 22:23:24 +01:00
} else
2015-06-24 15:29:01 +02:00
return NMP_OBJECT_TYPE_UNKNOWN ;
2013-03-27 22:23:24 +01:00
}
/******************************************************************/
2014-01-07 17:21:12 +01:00
static gboolean
check_support_kernel_extended_ifa_flags ( NMPlatform * platform )
{
g_return_val_if_fail ( NM_IS_LINUX_PLATFORM ( platform ) , FALSE ) ;
2015-05-06 11:55:02 +02:00
return _support_kernel_extended_ifa_flags_get ( ) ;
2014-01-07 17:21:12 +01:00
}
2014-07-24 15:57:08 -05:00
static gboolean
check_support_user_ipv6ll ( NMPlatform * platform )
{
g_return_val_if_fail ( NM_IS_LINUX_PLATFORM ( platform ) , FALSE ) ;
2015-04-13 18:21:55 +02:00
return _support_user_ipv6ll_get ( ) ;
2014-07-24 15:57:08 -05:00
}
2015-06-19 15:38:41 +02:00
static void
process_events ( NMPlatform * platform )
{
delayed_action_handle_all ( platform , TRUE ) ;
}
2015-06-19 16:58:28 +02:00
/******************************************************************/
# define cache_lookup_all_objects(type, platform, obj_type, visible_only) \
( ( const type * const * ) nmp_cache_lookup_multi ( NM_LINUX_PLATFORM_GET_PRIVATE ( ( platform ) ) - > cache , \
nmp_cache_id_init_object_type ( NMP_CACHE_ID_STATIC , ( obj_type ) , ( visible_only ) ) , \
NULL ) )
2015-05-29 09:40:24 +02:00
/******************************************************************/
2013-03-27 22:23:24 +01:00
2015-04-13 15:44:10 -05:00
# define DEVTYPE_PREFIX "DEVTYPE="
static char *
read_devtype ( const char * sysfs_path )
{
gs_free char * uevent = g_strdup_printf ( " %s/uevent " , sysfs_path ) ;
char * contents = NULL ;
char * cont , * end ;
if ( ! g_file_get_contents ( uevent , & contents , NULL , NULL ) )
return NULL ;
for ( cont = contents ; cont ; cont = end ) {
end = strpbrk ( cont , " \r \n " ) ;
if ( end )
* end + + = ' \0 ' ;
if ( strncmp ( cont , DEVTYPE_PREFIX , STRLEN ( DEVTYPE_PREFIX ) ) = = 0 ) {
cont + = STRLEN ( DEVTYPE_PREFIX ) ;
memmove ( contents , cont , strlen ( cont ) + 1 ) ;
return contents ;
}
}
g_free ( contents ) ;
return NULL ;
}
2015-07-01 12:52:21 +02:00
static const NMPObject *
_lookup_link_cached ( NMPlatform * platform , int ifindex , gboolean * completed_from_cache , const NMPObject * * link_cached )
{
const NMPObject * obj ;
nm_assert ( completed_from_cache & & link_cached ) ;
if ( ! * completed_from_cache ) {
obj = ifindex > 0 ? nmp_cache_lookup_link ( NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) - > cache , ifindex ) : NULL ;
if ( obj & & ! obj - > _link . netlink . is_in_netlink )
* link_cached = obj ;
else
* link_cached = NULL ;
* completed_from_cache = TRUE ;
}
return * link_cached ;
}
2013-05-29 12:00:50 -03:00
static NMLinkType
2015-07-01 12:52:21 +02:00
link_extract_type ( NMPlatform * platform , struct rtnl_link * rtnllink , gboolean * completed_from_cache , const NMPObject * * link_cached , const char * * out_kind )
2013-03-27 22:23:24 +01:00
{
2015-04-14 17:18:34 -05:00
const char * rtnl_type , * ifname ;
int i , arptype ;
2013-03-27 22:23:24 +01:00
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
if ( ! rtnllink ) {
if ( out_kind )
* out_kind = NULL ;
2015-04-28 10:11:04 +02:00
return NM_LINK_TYPE_NONE ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
}
2013-03-27 22:23:24 +01:00
2015-04-14 17:18:34 -05:00
rtnl_type = rtnl_link_get_type ( rtnllink ) ;
2015-07-01 12:52:21 +02:00
if ( ! rtnl_type & & completed_from_cache ) {
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
const NMPObject * obj ;
2015-07-01 12:52:21 +02:00
obj = _lookup_link_cached ( platform , rtnl_link_get_ifindex ( rtnllink ) , completed_from_cache , link_cached ) ;
if ( obj & & obj - > link . kind ) {
rtnl_type = obj - > link . kind ;
_LOGT ( " link_extract_type(): complete kind from cache: ifindex=%d, kind=%s " , rtnl_link_get_ifindex ( rtnllink ) , rtnl_type ) ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
}
}
if ( out_kind )
* out_kind = rtnl_type ;
2015-04-14 17:18:34 -05:00
if ( rtnl_type ) {
for ( i = 0 ; i < G_N_ELEMENTS ( linktypes ) ; i + + ) {
if ( g_strcmp0 ( rtnl_type , linktypes [ i ] . rtnl_type ) = = 0 )
2015-04-28 10:11:04 +02:00
return linktypes [ i ] . nm_type ;
2015-04-14 17:18:34 -05:00
}
2013-03-27 22:23:24 +01:00
2015-04-14 17:18:34 -05:00
if ( ! strcmp ( rtnl_type , " tun " ) ) {
NMPlatformTunProperties props ;
guint flags ;
2015-04-23 23:16:00 +02:00
if ( tun_get_properties_ifname ( platform , rtnl_link_get_name ( rtnllink ) , & props ) ) {
2015-04-14 17:18:34 -05:00
if ( ! g_strcmp0 ( props . mode , " tap " ) )
2015-04-28 10:11:04 +02:00
return NM_LINK_TYPE_TAP ;
2015-04-14 17:18:34 -05:00
if ( ! g_strcmp0 ( props . mode , " tun " ) )
2015-04-28 10:11:04 +02:00
return NM_LINK_TYPE_TUN ;
2015-04-14 17:18:34 -05:00
}
flags = rtnl_link_get_flags ( rtnllink ) ;
nm_log_dbg ( LOGD_PLATFORM , " Failed to read tun properties for interface %d (link flags: %X) " ,
rtnl_link_get_ifindex ( rtnllink ) , flags ) ;
platform: fix link_type_from_udev() to use ifname from libnl
When an interface gets renamed, we first receive a libnl update with
the changed interface name.
This results in the following chain of calls:
- event_notification()
- announce_object()
- link_init()
- link_extract_type()
- link_type_from_udev()
Then link_type_from_udev() looks up the name in the udev data (getting
the previous name, because we did not yet recieve the udev notification)
and passes the name to wifi_utils_is_wifi(), which eventually calls
nm_platform_link_get_ifindex() -- doing a lookup by the old name.
Fix this, by passing the ifname from libnl to link_type_from_udev().
Also, change hack_empty_master_iff_lower_up() because it is called
from event_notification(), at a moment when the link cache possibly
does not yet know the ifindex -- so that the call chain to
link_extract_type(), link_type_from_udev(), wifi_utils_is_wifi()
again might lead to lookup for something that does not yet exist.
Note, that in this case the name would not yet exist, because we
did not yet put the libnl object into the link cache.
Signed-off-by: Thomas Haller <thaller@redhat.com>
2014-03-06 21:11:47 +01:00
2015-04-14 17:18:34 -05:00
/* try guessing the type using the link flags instead... */
if ( flags & IFF_POINTOPOINT )
2015-04-28 10:11:04 +02:00
return NM_LINK_TYPE_TUN ;
return NM_LINK_TYPE_TAP ;
2015-04-14 17:18:34 -05:00
}
}
platform: fix link_type_from_udev() to use ifname from libnl
When an interface gets renamed, we first receive a libnl update with
the changed interface name.
This results in the following chain of calls:
- event_notification()
- announce_object()
- link_init()
- link_extract_type()
- link_type_from_udev()
Then link_type_from_udev() looks up the name in the udev data (getting
the previous name, because we did not yet recieve the udev notification)
and passes the name to wifi_utils_is_wifi(), which eventually calls
nm_platform_link_get_ifindex() -- doing a lookup by the old name.
Fix this, by passing the ifname from libnl to link_type_from_udev().
Also, change hack_empty_master_iff_lower_up() because it is called
from event_notification(), at a moment when the link cache possibly
does not yet know the ifindex -- so that the call chain to
link_extract_type(), link_type_from_udev(), wifi_utils_is_wifi()
again might lead to lookup for something that does not yet exist.
Note, that in this case the name would not yet exist, because we
did not yet put the libnl object into the link cache.
Signed-off-by: Thomas Haller <thaller@redhat.com>
2014-03-06 21:11:47 +01:00
2015-04-14 17:18:34 -05:00
arptype = rtnl_link_get_arptype ( rtnllink ) ;
if ( arptype = = ARPHRD_LOOPBACK )
2015-04-28 10:11:04 +02:00
return NM_LINK_TYPE_LOOPBACK ;
2015-04-14 17:18:34 -05:00
else if ( arptype = = ARPHRD_INFINIBAND )
2015-04-28 10:11:04 +02:00
return NM_LINK_TYPE_INFINIBAND ;
2015-04-14 17:18:34 -05:00
ifname = rtnl_link_get_name ( rtnllink ) ;
if ( ifname ) {
2014-10-03 13:41:49 -05:00
gs_free char * driver = NULL ;
2015-04-14 17:18:34 -05:00
gs_free char * sysfs_path = NULL ;
gs_free char * anycast_mask = NULL ;
gs_free char * devtype = NULL ;
2014-04-21 14:12:39 -05:00
platform: fix link_type_from_udev() to use ifname from libnl
When an interface gets renamed, we first receive a libnl update with
the changed interface name.
This results in the following chain of calls:
- event_notification()
- announce_object()
- link_init()
- link_extract_type()
- link_type_from_udev()
Then link_type_from_udev() looks up the name in the udev data (getting
the previous name, because we did not yet recieve the udev notification)
and passes the name to wifi_utils_is_wifi(), which eventually calls
nm_platform_link_get_ifindex() -- doing a lookup by the old name.
Fix this, by passing the ifname from libnl to link_type_from_udev().
Also, change hack_empty_master_iff_lower_up() because it is called
from event_notification(), at a moment when the link cache possibly
does not yet know the ifindex -- so that the call chain to
link_extract_type(), link_type_from_udev(), wifi_utils_is_wifi()
again might lead to lookup for something that does not yet exist.
Note, that in this case the name would not yet exist, because we
did not yet put the libnl object into the link cache.
Signed-off-by: Thomas Haller <thaller@redhat.com>
2014-03-06 21:11:47 +01:00
if ( arptype = = 256 ) {
2013-04-25 15:46:39 -04:00
/* Some s390 CTC-type devices report 256 for the encapsulation type
2015-04-20 14:14:36 +02:00
* for some reason , but we need to call them Ethernet .
2013-04-25 15:46:39 -04:00
*/
2015-04-20 14:14:36 +02:00
if ( ! g_strcmp0 ( driver , " ctcm " ) )
2015-04-28 10:11:04 +02:00
return NM_LINK_TYPE_ETHERNET ;
2013-09-06 19:39:11 -05:00
}
2015-04-14 17:18:34 -05:00
/* Fallback OVS detection for kernel <= 3.16 */
2015-05-02 07:59:59 +02:00
if ( nmp_utils_ethtool_get_driver_info ( ifname , & driver , NULL , NULL ) ) {
2014-10-03 13:41:49 -05:00
if ( ! g_strcmp0 ( driver , " openvswitch " ) )
return NM_LINK_TYPE_OPENVSWITCH ;
}
2013-09-06 19:39:11 -05:00
2015-04-13 15:44:10 -05:00
sysfs_path = g_strdup_printf ( " /sys/class/net/%s " , ifname ) ;
anycast_mask = g_strdup_printf ( " %s/anycast_mask " , sysfs_path ) ;
if ( g_file_test ( anycast_mask , G_FILE_TEST_EXISTS ) )
2015-04-28 10:11:04 +02:00
return NM_LINK_TYPE_OLPC_MESH ;
2015-04-13 15:44:10 -05:00
devtype = read_devtype ( sysfs_path ) ;
2015-04-14 17:18:34 -05:00
for ( i = 0 ; devtype & & i < G_N_ELEMENTS ( linktypes ) ; i + + ) {
2015-05-06 09:20:55 -05:00
if ( g_strcmp0 ( devtype , linktypes [ i ] . devtype ) = = 0 ) {
if ( linktypes [ i ] . nm_type = = NM_LINK_TYPE_BNEP ) {
/* Both BNEP and 6lowpan use DEVTYPE=bluetooth, so we must
* use arptype to distinguish between them .
*/
if ( arptype ! = ARPHRD_ETHER )
continue ;
}
2015-04-28 10:11:04 +02:00
return linktypes [ i ] . nm_type ;
2015-05-06 09:20:55 -05:00
}
2015-04-14 10:08:12 +02:00
}
2015-04-13 15:44:10 -05:00
2015-04-14 17:18:34 -05:00
/* Fallback for drivers that don't call SET_NETDEV_DEVTYPE() */
if ( wifi_utils_is_wifi ( ifname , sysfs_path ) )
2015-04-28 10:11:04 +02:00
return NM_LINK_TYPE_WIFI ;
2015-04-14 17:18:34 -05:00
/* Standard wired ethernet interfaces don't report an rtnl_link_type, so
* only allow fallback to Ethernet if no type is given . This should
* prevent future virtual network drivers from being treated as Ethernet
* when they should be Generic instead .
*/
if ( arptype = = ARPHRD_ETHER & & ! rtnl_type & & ! devtype )
2015-04-28 10:11:04 +02:00
return NM_LINK_TYPE_ETHERNET ;
2015-04-14 17:18:34 -05:00
}
2015-04-13 15:44:10 -05:00
2015-04-28 10:11:04 +02:00
return NM_LINK_TYPE_UNKNOWN ;
2013-05-29 12:00:50 -03:00
}
2015-04-14 23:14:06 +02:00
gboolean
_nmp_vt_cmd_plobj_init_from_nl_link ( NMPlatform * platform , NMPlatformObject * _obj , const struct nl_object * _nlo , gboolean id_only , gboolean complete_from_cache )
{
NMPlatformLink * obj = ( NMPlatformLink * ) _obj ;
NMPObjectLink * obj_priv = ( NMPObjectLink * ) _obj ;
struct rtnl_link * nlo = ( struct rtnl_link * ) _nlo ;
const char * name ;
struct nl_addr * nladdr ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
const char * kind ;
2015-07-01 12:52:21 +02:00
gboolean completed_from_cache_val = FALSE ;
gboolean * completed_from_cache = complete_from_cache ? & completed_from_cache_val : NULL ;
2015-07-01 16:35:52 +02:00
const NMPObject * link_cached = NULL ;
2015-08-17 17:48:37 +02:00
int parent ;
2015-04-14 23:14:06 +02:00
nm_assert ( memcmp ( obj , ( ( char [ sizeof ( NMPObjectLink ) ] ) { 0 } ) , sizeof ( NMPObjectLink ) ) = = 0 ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
if ( _LOGT_ENABLED ( ) & & ! NM_IN_SET ( rtnl_link_get_family ( nlo ) , AF_UNSPEC , AF_BRIDGE ) )
_LOGT ( " netlink object for ifindex %d has unusual family %d " , rtnl_link_get_ifindex ( nlo ) , rtnl_link_get_family ( nlo ) ) ;
2015-04-14 23:14:06 +02:00
obj - > ifindex = rtnl_link_get_ifindex ( nlo ) ;
if ( id_only )
return TRUE ;
name = rtnl_link_get_name ( nlo ) ;
if ( name )
g_strlcpy ( obj - > name , name , sizeof ( obj - > name ) ) ;
2015-07-01 12:52:21 +02:00
obj - > type = link_extract_type ( platform , nlo , completed_from_cache , & link_cached , & kind ) ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
obj - > kind = g_intern_string ( kind ) ;
2015-04-14 23:14:06 +02:00
obj - > flags = rtnl_link_get_flags ( nlo ) ;
obj - > connected = NM_FLAGS_HAS ( obj - > flags , IFF_LOWER_UP ) ;
obj - > master = rtnl_link_get_master ( nlo ) ;
2015-08-17 17:48:37 +02:00
parent = rtnl_link_get_link ( nlo ) ;
if ( parent > 0 ) {
gint32 link_netnsid ;
if ( _rtnl_link_get_link_netnsid ( nlo , & link_netnsid ) = = 0 )
obj - > parent = NM_PLATFORM_LINK_OTHER_NETNS ;
else
obj - > parent = parent ;
}
2015-04-14 23:14:06 +02:00
obj - > mtu = rtnl_link_get_mtu ( nlo ) ;
obj - > arptype = rtnl_link_get_arptype ( nlo ) ;
2015-07-01 12:52:21 +02:00
if ( obj - > type = = NM_LINK_TYPE_VLAN ) {
if ( ! g_strcmp0 ( rtnl_link_get_type ( nlo ) , " vlan " ) )
obj - > vlan_id = rtnl_link_vlan_get_id ( nlo ) ;
else if ( completed_from_cache ) {
_lookup_link_cached ( platform , obj - > ifindex , completed_from_cache , & link_cached ) ;
if ( link_cached )
obj - > vlan_id = link_cached - > link . vlan_id ;
}
}
2015-04-14 23:14:06 +02:00
if ( ( nladdr = rtnl_link_get_addr ( nlo ) ) ) {
unsigned int l = 0 ;
l = nl_addr_get_len ( nladdr ) ;
if ( l > 0 & & l < = NM_UTILS_HWADDR_LEN_MAX ) {
G_STATIC_ASSERT ( NM_UTILS_HWADDR_LEN_MAX = = sizeof ( obj - > addr . data ) ) ;
memcpy ( obj - > addr . data , nl_addr_get_binary_addr ( nladdr ) , l ) ;
obj - > addr . len = l ;
}
}
# if HAVE_LIBNL_INET6_ADDR_GEN_MODE
if ( _support_user_ipv6ll_get ( ) ) {
guint8 mode = 0 ;
if ( rtnl_link_inet6_get_addr_gen_mode ( nlo , & mode ) = = 0 )
2015-06-25 18:44:58 +02:00
obj - > inet6_addr_gen_mode_inv = _nm_platform_uint8_inv ( mode ) ;
2015-04-14 23:14:06 +02:00
}
# endif
# if HAVE_LIBNL_INET6_TOKEN
if ( ( rtnl_link_inet6_get_token ( nlo , & nladdr ) ) = = 0 ) {
if ( nl_addr_get_family ( nladdr ) = = AF_INET6
& & nl_addr_get_len ( nladdr ) = = sizeof ( struct in6_addr ) ) {
struct in6_addr * addr ;
NMUtilsIPv6IfaceId * iid = & obj - > inet6_token . iid ;
addr = nl_addr_get_binary_addr ( nladdr ) ;
iid - > id_u8 [ 7 ] = addr - > s6_addr [ 15 ] ;
iid - > id_u8 [ 6 ] = addr - > s6_addr [ 14 ] ;
iid - > id_u8 [ 5 ] = addr - > s6_addr [ 13 ] ;
iid - > id_u8 [ 4 ] = addr - > s6_addr [ 12 ] ;
iid - > id_u8 [ 3 ] = addr - > s6_addr [ 11 ] ;
iid - > id_u8 [ 2 ] = addr - > s6_addr [ 10 ] ;
iid - > id_u8 [ 1 ] = addr - > s6_addr [ 9 ] ;
iid - > id_u8 [ 0 ] = addr - > s6_addr [ 8 ] ;
obj - > inet6_token . is_valid = TRUE ;
}
nl_addr_put ( nladdr ) ;
}
# endif
obj_priv - > netlink . is_in_netlink = TRUE ;
return TRUE ;
}
2014-06-07 13:52:01 +02:00
/* _timestamp_nl_to_ms:
* @ timestamp_nl : a timestamp from ifa_cacheinfo .
* @ monotonic_ms : * now * in CLOCK_MONOTONIC . Needed to estimate the current
* uptime and how often timestamp_nl wrapped .
*
* Convert the timestamp from ifa_cacheinfo to CLOCK_MONOTONIC milliseconds .
* The ifa_cacheinfo fields tstamp and cstamp contains timestamps that counts
* with in 1 / 100 th of a second of clock_gettime ( CLOCK_MONOTONIC ) . However ,
* the uint32 counter wraps every 497 days of uptime , so we have to compensate
* for that . */
static gint64
_timestamp_nl_to_ms ( guint32 timestamp_nl , gint64 monotonic_ms )
{
const gint64 WRAP_INTERVAL = ( ( ( gint64 ) G_MAXUINT32 ) + 1 ) * ( 1000 / 100 ) ;
gint64 timestamp_nl_ms ;
/* convert timestamp from 1/100th of a second to msec. */
timestamp_nl_ms = ( ( gint64 ) timestamp_nl ) * ( 1000 / 100 ) ;
/* timestamp wraps every 497 days. Try to compensate for that.*/
if ( timestamp_nl_ms > monotonic_ms ) {
/* timestamp_nl_ms is in the future. Truncate it to *now* */
timestamp_nl_ms = monotonic_ms ;
} else if ( monotonic_ms > = WRAP_INTERVAL ) {
timestamp_nl_ms + = ( monotonic_ms / WRAP_INTERVAL ) * WRAP_INTERVAL ;
if ( timestamp_nl_ms > monotonic_ms )
timestamp_nl_ms - = WRAP_INTERVAL ;
}
return timestamp_nl_ms ;
}
static guint32
2015-04-14 12:53:20 +02:00
_rtnl_addr_last_update_time_to_nm ( const struct rtnl_addr * rtnladdr , gint32 * out_now_nm )
2014-06-07 13:52:01 +02:00
{
guint32 last_update_time = rtnl_addr_get_last_update_time ( ( struct rtnl_addr * ) rtnladdr ) ;
struct timespec tp ;
gint64 now_nl , now_nm , result ;
/* timestamp is unset. Default to 1. */
2015-04-14 12:53:20 +02:00
if ( ! last_update_time ) {
if ( out_now_nm )
* out_now_nm = 0 ;
2014-06-07 13:52:01 +02:00
return 1 ;
2015-04-14 12:53:20 +02:00
}
2014-06-07 13:52:01 +02:00
/* do all the calculations in milliseconds scale */
clock_gettime ( CLOCK_MONOTONIC , & tp ) ;
now_nm = nm_utils_get_monotonic_timestamp_ms ( ) ;
now_nl = ( ( ( gint64 ) tp . tv_sec ) * ( ( gint64 ) 1000 ) ) +
( tp . tv_nsec / ( NM_UTILS_NS_PER_SECOND / 1000 ) ) ;
result = now_nm - ( now_nl - _timestamp_nl_to_ms ( last_update_time , now_nl ) ) ;
2015-04-14 12:53:20 +02:00
if ( out_now_nm )
* out_now_nm = now_nm / 1000 ;
2014-06-07 13:52:01 +02:00
/* converting the last_update_time into nm_utils_get_monotonic_timestamp_ms() scale is
* a good guess but fails in the following situations :
*
* - If the address existed before start of the process , the timestamp in nm scale would
* be negative or zero . In this case we default to 1.
* - during hibernation , the CLOCK_MONOTONIC / last_update_time drifts from
* nm_utils_get_monotonic_timestamp_ms ( ) scale .
*/
if ( result < = 1000 )
return 1 ;
if ( result > now_nm )
return now_nm / 1000 ;
return result / 1000 ;
}
2015-04-14 23:14:06 +02:00
static guint32
_extend_lifetime ( guint32 lifetime , guint32 seconds )
{
guint64 v ;
if ( lifetime = = NM_PLATFORM_LIFETIME_PERMANENT
| | seconds = = 0 )
return lifetime ;
v = ( guint64 ) lifetime + ( guint64 ) seconds ;
return MIN ( v , NM_PLATFORM_LIFETIME_PERMANENT - 1 ) ;
}
/* The rtnl_addr object contains relative lifetimes @valid and @preferred
* that count in seconds , starting from the moment when the kernel constructed
* the netlink message .
*
* There is also a field rtnl_addr_last_update_time ( ) , which is the absolute
* time in 1 / 100 th of a second of clock_gettime ( CLOCK_MONOTONIC ) when the address
* was modified ( wrapping every 497 days ) .
* Immediately at the time when the address was last modified , # NOW and @ last_update_time
* are the same , so ( only ) in that case @ valid and @ preferred are anchored at @ last_update_time .
* However , this is not true in general . As time goes by , whenever kernel sends a new address
* via netlink , the lifetimes keep counting down .
* */
static void
_nlo_rtnl_addr_get_lifetimes ( const struct rtnl_addr * rtnladdr ,
guint32 * out_timestamp ,
guint32 * out_lifetime ,
guint32 * out_preferred )
{
guint32 timestamp = 0 ;
gint32 now ;
guint32 lifetime = rtnl_addr_get_valid_lifetime ( ( struct rtnl_addr * ) rtnladdr ) ;
guint32 preferred = rtnl_addr_get_preferred_lifetime ( ( struct rtnl_addr * ) rtnladdr ) ;
if ( lifetime ! = NM_PLATFORM_LIFETIME_PERMANENT
| | preferred ! = NM_PLATFORM_LIFETIME_PERMANENT ) {
if ( preferred > lifetime )
preferred = lifetime ;
timestamp = _rtnl_addr_last_update_time_to_nm ( rtnladdr , & now ) ;
if ( now = = 0 ) {
/* strange. failed to detect the last-update time and assumed that timestamp is 1. */
nm_assert ( timestamp = = 1 ) ;
now = nm_utils_get_monotonic_timestamp_s ( ) ;
}
if ( timestamp < now ) {
guint32 diff = now - timestamp ;
lifetime = _extend_lifetime ( lifetime , diff ) ;
preferred = _extend_lifetime ( preferred , diff ) ;
} else
nm_assert ( timestamp = = now ) ;
}
* out_timestamp = timestamp ;
* out_lifetime = lifetime ;
* out_preferred = preferred ;
}
gboolean
_nmp_vt_cmd_plobj_init_from_nl_ip4_address ( NMPlatform * platform , NMPlatformObject * _obj , const struct nl_object * _nlo , gboolean id_only , gboolean complete_from_cache )
{
NMPlatformIP4Address * obj = ( NMPlatformIP4Address * ) _obj ;
struct rtnl_addr * nlo = ( struct rtnl_addr * ) _nlo ;
struct nl_addr * nladdr = rtnl_addr_get_local ( nlo ) ;
struct nl_addr * nlpeer = rtnl_addr_get_peer ( nlo ) ;
const char * label ;
if ( ! nladdr | | nl_addr_get_len ( nladdr ) ! = sizeof ( obj - > address ) )
g_return_val_if_reached ( FALSE ) ;
obj - > ifindex = rtnl_addr_get_ifindex ( nlo ) ;
obj - > plen = rtnl_addr_get_prefixlen ( nlo ) ;
memcpy ( & obj - > address , nl_addr_get_binary_addr ( nladdr ) , sizeof ( obj - > address ) ) ;
if ( id_only )
return TRUE ;
obj - > source = NM_IP_CONFIG_SOURCE_KERNEL ;
_nlo_rtnl_addr_get_lifetimes ( nlo ,
& obj - > timestamp ,
& obj - > lifetime ,
& obj - > preferred ) ;
if ( nlpeer ) {
if ( nl_addr_get_len ( nlpeer ) ! = sizeof ( obj - > peer_address ) )
g_warn_if_reached ( ) ;
else
memcpy ( & obj - > peer_address , nl_addr_get_binary_addr ( nlpeer ) , sizeof ( obj - > peer_address ) ) ;
}
label = rtnl_addr_get_label ( nlo ) ;
/* Check for ':'; we're only interested in labels used as interface aliases */
if ( label & & strchr ( label , ' : ' ) )
g_strlcpy ( obj - > label , label , sizeof ( obj - > label ) ) ;
return TRUE ;
}
gboolean
_nmp_vt_cmd_plobj_init_from_nl_ip6_address ( NMPlatform * platform , NMPlatformObject * _obj , const struct nl_object * _nlo , gboolean id_only , gboolean complete_from_cache )
{
NMPlatformIP6Address * obj = ( NMPlatformIP6Address * ) _obj ;
struct rtnl_addr * nlo = ( struct rtnl_addr * ) _nlo ;
struct nl_addr * nladdr = rtnl_addr_get_local ( nlo ) ;
struct nl_addr * nlpeer = rtnl_addr_get_peer ( nlo ) ;
if ( ! nladdr | | nl_addr_get_len ( nladdr ) ! = sizeof ( obj - > address ) )
g_return_val_if_reached ( FALSE ) ;
obj - > ifindex = rtnl_addr_get_ifindex ( nlo ) ;
obj - > plen = rtnl_addr_get_prefixlen ( nlo ) ;
memcpy ( & obj - > address , nl_addr_get_binary_addr ( nladdr ) , sizeof ( obj - > address ) ) ;
if ( id_only )
return TRUE ;
obj - > source = NM_IP_CONFIG_SOURCE_KERNEL ;
_nlo_rtnl_addr_get_lifetimes ( nlo ,
& obj - > timestamp ,
& obj - > lifetime ,
& obj - > preferred ) ;
obj - > flags = rtnl_addr_get_flags ( nlo ) ;
if ( nlpeer ) {
if ( nl_addr_get_len ( nlpeer ) ! = sizeof ( obj - > peer_address ) )
g_warn_if_reached ( ) ;
else
memcpy ( & obj - > peer_address , nl_addr_get_binary_addr ( nlpeer ) , sizeof ( obj - > peer_address ) ) ;
}
return TRUE ;
}
gboolean
_nmp_vt_cmd_plobj_init_from_nl_ip4_route ( NMPlatform * platform , NMPlatformObject * _obj , const struct nl_object * _nlo , gboolean id_only , gboolean complete_from_cache )
{
NMPlatformIP4Route * obj = ( NMPlatformIP4Route * ) _obj ;
struct rtnl_route * nlo = ( struct rtnl_route * ) _nlo ;
struct nl_addr * dst , * gw ;
struct rtnl_nexthop * nexthop ;
2015-06-22 13:22:48 +02:00
struct nl_addr * pref_src ;
2015-04-14 23:14:06 +02:00
if ( rtnl_route_get_type ( nlo ) ! = RTN_UNICAST | |
rtnl_route_get_table ( nlo ) ! = RT_TABLE_MAIN | |
rtnl_route_get_tos ( nlo ) ! = 0 | |
rtnl_route_get_nnexthops ( nlo ) ! = 1 )
return FALSE ;
nexthop = rtnl_route_nexthop_n ( nlo , 0 ) ;
if ( ! nexthop )
g_return_val_if_reached ( FALSE ) ;
dst = rtnl_route_get_dst ( nlo ) ;
if ( ! dst )
g_return_val_if_reached ( FALSE ) ;
if ( nl_addr_get_len ( dst ) ) {
if ( nl_addr_get_len ( dst ) ! = sizeof ( obj - > network ) )
g_return_val_if_reached ( FALSE ) ;
memcpy ( & obj - > network , nl_addr_get_binary_addr ( dst ) , sizeof ( obj - > network ) ) ;
}
obj - > ifindex = rtnl_route_nh_get_ifindex ( nexthop ) ;
obj - > plen = nl_addr_get_prefixlen ( dst ) ;
obj - > metric = rtnl_route_get_priority ( nlo ) ;
obj - > scope_inv = nm_platform_route_scope_inv ( rtnl_route_get_scope ( nlo ) ) ;
gw = rtnl_route_nh_get_gateway ( nexthop ) ;
if ( gw ) {
if ( nl_addr_get_len ( gw ) ! = sizeof ( obj - > gateway ) )
g_warn_if_reached ( ) ;
else
memcpy ( & obj - > gateway , nl_addr_get_binary_addr ( gw ) , sizeof ( obj - > gateway ) ) ;
}
rtnl_route_get_metric ( nlo , RTAX_ADVMSS , & obj - > mss ) ;
if ( rtnl_route_get_flags ( nlo ) & RTM_F_CLONED ) {
/* we must not straight way reject cloned routes, because we might have cached
* a non - cloned route . If we now receive an update of the route with the route
* being cloned , we must still return the object , so that we can remove the old
* one from the cache .
*
* This happens , because this route is not nmp_object_is_alive ( ) .
* */
obj - > source = _NM_IP_CONFIG_SOURCE_RTM_F_CLONED ;
} else
2015-05-29 09:55:51 +02:00
obj - > source = _nm_ip_config_source_from_rtprot ( rtnl_route_get_protocol ( nlo ) ) ;
2015-04-14 23:14:06 +02:00
2015-06-22 13:22:48 +02:00
pref_src = rtnl_route_get_pref_src ( nlo ) ;
if ( pref_src ) {
if ( nl_addr_get_len ( pref_src ) ! = sizeof ( obj - > pref_src ) )
g_warn_if_reached ( ) ;
else
memcpy ( & obj - > pref_src , nl_addr_get_binary_addr ( pref_src ) , sizeof ( obj - > pref_src ) ) ;
}
2015-04-14 23:14:06 +02:00
return TRUE ;
}
gboolean
_nmp_vt_cmd_plobj_init_from_nl_ip6_route ( NMPlatform * platform , NMPlatformObject * _obj , const struct nl_object * _nlo , gboolean id_only , gboolean complete_from_cache )
{
NMPlatformIP6Route * obj = ( NMPlatformIP6Route * ) _obj ;
struct rtnl_route * nlo = ( struct rtnl_route * ) _nlo ;
struct nl_addr * dst , * gw ;
struct rtnl_nexthop * nexthop ;
if ( rtnl_route_get_type ( nlo ) ! = RTN_UNICAST | |
rtnl_route_get_table ( nlo ) ! = RT_TABLE_MAIN | |
rtnl_route_get_tos ( nlo ) ! = 0 | |
rtnl_route_get_nnexthops ( nlo ) ! = 1 )
return FALSE ;
nexthop = rtnl_route_nexthop_n ( nlo , 0 ) ;
if ( ! nexthop )
g_return_val_if_reached ( FALSE ) ;
dst = rtnl_route_get_dst ( nlo ) ;
if ( ! dst )
g_return_val_if_reached ( FALSE ) ;
if ( nl_addr_get_len ( dst ) ) {
if ( nl_addr_get_len ( dst ) ! = sizeof ( obj - > network ) )
g_return_val_if_reached ( FALSE ) ;
memcpy ( & obj - > network , nl_addr_get_binary_addr ( dst ) , sizeof ( obj - > network ) ) ;
}
obj - > ifindex = rtnl_route_nh_get_ifindex ( nexthop ) ;
obj - > plen = nl_addr_get_prefixlen ( dst ) ;
obj - > metric = rtnl_route_get_priority ( nlo ) ;
if ( id_only )
return TRUE ;
gw = rtnl_route_nh_get_gateway ( nexthop ) ;
if ( gw ) {
if ( nl_addr_get_len ( gw ) ! = sizeof ( obj - > gateway ) )
g_warn_if_reached ( ) ;
else
memcpy ( & obj - > gateway , nl_addr_get_binary_addr ( gw ) , sizeof ( obj - > gateway ) ) ;
}
rtnl_route_get_metric ( nlo , RTAX_ADVMSS , & obj - > mss ) ;
if ( rtnl_route_get_flags ( nlo ) & RTM_F_CLONED )
obj - > source = _NM_IP_CONFIG_SOURCE_RTM_F_CLONED ;
else
2015-05-29 09:55:51 +02:00
obj - > source = _nm_ip_config_source_from_rtprot ( rtnl_route_get_protocol ( nlo ) ) ;
2015-04-14 23:14:06 +02:00
return TRUE ;
}
2013-03-27 22:23:24 +01:00
/******************************************************************/
2015-04-06 18:29:36 +02:00
static void
do_emit_signal ( NMPlatform * platform , const NMPObject * obj , NMPCacheOpsType cache_op , gboolean was_visible , NMPlatformReason reason )
{
gboolean is_visible ;
2015-06-15 18:47:04 +02:00
NMPObject obj_clone ;
const NMPClass * klass ;
2015-04-06 18:29:36 +02:00
nm_assert ( NM_IN_SET ( ( NMPlatformSignalChangeType ) cache_op , ( NMPlatformSignalChangeType ) NMP_CACHE_OPS_UNCHANGED , NM_PLATFORM_SIGNAL_ADDED , NM_PLATFORM_SIGNAL_CHANGED , NM_PLATFORM_SIGNAL_REMOVED ) ) ;
nm_assert ( obj | | cache_op = = NMP_CACHE_OPS_UNCHANGED ) ;
nm_assert ( ! obj | | cache_op = = NMP_CACHE_OPS_REMOVED | | obj = = nmp_cache_lookup_obj ( NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) - > cache , obj ) ) ;
nm_assert ( ! obj | | cache_op ! = NMP_CACHE_OPS_REMOVED | | obj ! = nmp_cache_lookup_obj ( NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) - > cache , obj ) ) ;
switch ( cache_op ) {
case NMP_CACHE_OPS_ADDED :
if ( ! nmp_object_is_visible ( obj ) )
return ;
break ;
case NMP_CACHE_OPS_UPDATED :
is_visible = nmp_object_is_visible ( obj ) ;
if ( ! was_visible & & is_visible )
cache_op = NMP_CACHE_OPS_ADDED ;
else if ( was_visible & & ! is_visible ) {
/* This is a bit ugly. The object was visible and changed in a way that it became invisible.
* We raise a removed signal , but contrary to a real ' remove ' , @ obj is already changed to be
* different from what it was when the user saw it the last time .
*
* The more correct solution would be to have cache_pre_hook ( ) create a clone of the original
* value before it was changed to become invisible .
*
* But , don ' t bother . Probably nobody depends on the original values and only cares about the
* id properties ( which are still correct ) .
*/
cache_op = NMP_CACHE_OPS_REMOVED ;
} else if ( ! is_visible )
return ;
break ;
case NMP_CACHE_OPS_REMOVED :
if ( ! was_visible )
return ;
break ;
default :
g_assert ( cache_op = = NMP_CACHE_OPS_UNCHANGED ) ;
return ;
}
2015-06-15 18:47:04 +02:00
klass = NMP_OBJECT_GET_CLASS ( obj ) ;
2015-04-06 18:29:36 +02:00
_LOGT ( " emit signal %s %s: %s (%ld) " ,
2015-06-15 18:47:04 +02:00
klass - > signal_type ,
2015-04-06 18:29:36 +02:00
nm_platform_signal_change_type_to_string ( ( NMPlatformSignalChangeType ) cache_op ) ,
nmp_object_to_string ( obj , NMP_OBJECT_TO_STRING_PUBLIC , NULL , 0 ) ,
( long ) reason ) ;
2015-06-15 18:47:04 +02:00
/* don't expose @obj directly, but clone the public fields. A signal handler might
* call back into NMPlatform which could invalidate ( or modify ) @ obj . */
memcpy ( & obj_clone . object , & obj - > object , klass - > sizeof_public ) ;
2015-06-24 15:22:46 +02:00
g_signal_emit_by_name ( platform , klass - > signal_type , klass - > obj_type , obj_clone . object . ifindex , & obj_clone . object , ( NMPlatformSignalChangeType ) cache_op , reason ) ;
2015-04-06 18:29:36 +02:00
}
/******************************************************************/
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
static DelayedActionType
2015-06-19 16:24:18 +02:00
delayed_action_refresh_from_object_type ( NMPObjectType obj_type )
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
{
switch ( obj_type ) {
2015-06-19 16:24:18 +02:00
case NMP_OBJECT_TYPE_LINK : return DELAYED_ACTION_TYPE_REFRESH_ALL_LINKS ;
case NMP_OBJECT_TYPE_IP4_ADDRESS : return DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ADDRESSES ;
case NMP_OBJECT_TYPE_IP6_ADDRESS : return DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ADDRESSES ;
case NMP_OBJECT_TYPE_IP4_ROUTE : return DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ROUTES ;
case NMP_OBJECT_TYPE_IP6_ROUTE : return DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ROUTES ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
default : g_return_val_if_reached ( DELAYED_ACTION_TYPE_NONE ) ;
}
}
2015-06-19 16:24:18 +02:00
static NMPObjectType
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
delayed_action_refresh_to_object_type ( DelayedActionType action_type )
{
switch ( action_type ) {
2015-06-19 16:24:18 +02:00
case DELAYED_ACTION_TYPE_REFRESH_ALL_LINKS : return NMP_OBJECT_TYPE_LINK ;
case DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ADDRESSES : return NMP_OBJECT_TYPE_IP4_ADDRESS ;
case DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ADDRESSES : return NMP_OBJECT_TYPE_IP6_ADDRESS ;
case DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ROUTES : return NMP_OBJECT_TYPE_IP4_ROUTE ;
case DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ROUTES : return NMP_OBJECT_TYPE_IP6_ROUTE ;
2015-06-24 15:29:01 +02:00
default : g_return_val_if_reached ( NMP_OBJECT_TYPE_UNKNOWN ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
}
}
2015-04-06 18:29:36 +02:00
static const char *
delayed_action_to_string ( DelayedActionType action_type )
{
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
switch ( action_type ) {
case DELAYED_ACTION_TYPE_REFRESH_ALL_LINKS : return " refresh-all-links " ;
case DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ADDRESSES : return " refresh-all-ip4-addresses " ;
case DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ADDRESSES : return " refresh-all-ip6-addresses " ;
case DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ROUTES : return " refresh-all-ip4-routes " ;
case DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ROUTES : return " refresh-all-ip6-routes " ;
case DELAYED_ACTION_TYPE_REFRESH_LINK : return " refresh-link " ;
case DELAYED_ACTION_TYPE_MASTER_CONNECTED : return " master-connected " ;
case DELAYED_ACTION_TYPE_READ_NETLINK : return " read-netlink " ;
default :
return " unknown " ;
}
2015-04-06 18:29:36 +02:00
}
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
# define _LOGT_delayed_action(action_type, arg, operation) \
_LOGT ( " delayed-action: %s %s (%d) [%p / %d] " , " " operation , delayed_action_to_string ( action_type ) , ( int ) action_type , arg , GPOINTER_TO_INT ( arg ) )
2015-04-06 18:29:36 +02:00
static void
delayed_action_handle_MASTER_CONNECTED ( NMPlatform * platform , int master_ifindex )
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
auto_nmp_obj NMPObject * obj_cache = NULL ;
gboolean was_visible ;
NMPCacheOpsType cache_op ;
cache_op = nmp_cache_update_link_master_connected ( priv - > cache , master_ifindex , & obj_cache , & was_visible , cache_pre_hook , platform ) ;
do_emit_signal ( platform , obj_cache , cache_op , was_visible , NM_PLATFORM_REASON_INTERNAL ) ;
}
static void
delayed_action_handle_REFRESH_LINK ( NMPlatform * platform , int ifindex )
{
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
do_request_link ( platform , ifindex , NULL , FALSE ) ;
2015-04-06 18:29:36 +02:00
}
static void
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
delayed_action_handle_REFRESH_ALL ( NMPlatform * platform , DelayedActionType flags )
2015-04-06 18:29:36 +02:00
{
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
do_request_all ( platform , flags , FALSE ) ;
2015-04-06 18:29:36 +02:00
}
static void
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
delayed_action_handle_READ_NETLINK ( NMPlatform * platform )
2015-04-06 18:29:36 +02:00
{
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
event_handler_read_netlink_all ( platform , TRUE ) ;
2015-04-06 18:29:36 +02:00
}
static gboolean
delayed_action_handle_one ( NMPlatform * platform )
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
gpointer user_data ;
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
if ( priv - > delayed_action . flags = = DELAYED_ACTION_TYPE_NONE ) {
2015-04-06 18:29:36 +02:00
nm_clear_g_source ( & priv - > delayed_action . idle_id ) ;
return FALSE ;
}
/* First process DELAYED_ACTION_TYPE_MASTER_CONNECTED actions.
* This type of action is entirely cache - internal and is here to resolve a
* cache inconsistency . It should be fixed right away . */
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
if ( NM_FLAGS_HAS ( priv - > delayed_action . flags , DELAYED_ACTION_TYPE_MASTER_CONNECTED ) ) {
nm_assert ( priv - > delayed_action . list_master_connected - > len > 0 ) ;
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
user_data = priv - > delayed_action . list_master_connected - > pdata [ 0 ] ;
g_ptr_array_remove_index_fast ( priv - > delayed_action . list_master_connected , 0 ) ;
if ( priv - > delayed_action . list_master_connected - > len = = 0 )
priv - > delayed_action . flags & = ~ DELAYED_ACTION_TYPE_MASTER_CONNECTED ;
nm_assert ( _nm_utils_ptrarray_find_first ( priv - > delayed_action . list_master_connected - > pdata , priv - > delayed_action . list_master_connected - > len , user_data ) < 0 ) ;
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
_LOGT_delayed_action ( DELAYED_ACTION_TYPE_MASTER_CONNECTED , user_data , " handle " ) ;
delayed_action_handle_MASTER_CONNECTED ( platform , GPOINTER_TO_INT ( user_data ) ) ;
return TRUE ;
}
nm_assert ( priv - > delayed_action . list_master_connected - > len = = 0 ) ;
/* Next we prefer read-netlink, because the buffer size is limited and we want to process events
* from netlink early . */
if ( NM_FLAGS_HAS ( priv - > delayed_action . flags , DELAYED_ACTION_TYPE_READ_NETLINK ) ) {
_LOGT_delayed_action ( DELAYED_ACTION_TYPE_READ_NETLINK , NULL , " handle " ) ;
priv - > delayed_action . flags & = ~ DELAYED_ACTION_TYPE_READ_NETLINK ;
delayed_action_handle_READ_NETLINK ( platform ) ;
2015-04-06 18:29:36 +02:00
return TRUE ;
}
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
if ( NM_FLAGS_ANY ( priv - > delayed_action . flags , DELAYED_ACTION_TYPE_REFRESH_ALL ) ) {
DelayedActionType flags , iflags ;
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
flags = priv - > delayed_action . flags & DELAYED_ACTION_TYPE_REFRESH_ALL ;
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
priv - > delayed_action . flags & = ~ DELAYED_ACTION_TYPE_REFRESH_ALL ;
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
if ( _LOGT_ENABLED ( ) ) {
for ( iflags = ( DelayedActionType ) 0x1LL ; iflags < = DELAYED_ACTION_TYPE_MAX ; iflags < < = 1 ) {
if ( NM_FLAGS_HAS ( flags , iflags ) )
_LOGT_delayed_action ( iflags , NULL , " handle " ) ;
}
2015-04-06 18:29:36 +02:00
}
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
delayed_action_handle_REFRESH_ALL ( platform , flags ) ;
return TRUE ;
}
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
nm_assert ( priv - > delayed_action . flags = = DELAYED_ACTION_TYPE_REFRESH_LINK ) ;
nm_assert ( priv - > delayed_action . list_refresh_link - > len > 0 ) ;
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
user_data = priv - > delayed_action . list_refresh_link - > pdata [ 0 ] ;
g_ptr_array_remove_index_fast ( priv - > delayed_action . list_refresh_link , 0 ) ;
if ( priv - > delayed_action . list_master_connected - > len = = 0 )
priv - > delayed_action . flags & = ~ DELAYED_ACTION_TYPE_REFRESH_LINK ;
nm_assert ( _nm_utils_ptrarray_find_first ( priv - > delayed_action . list_refresh_link - > pdata , priv - > delayed_action . list_refresh_link - > len , user_data ) < 0 ) ;
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
_LOGT_delayed_action ( DELAYED_ACTION_TYPE_REFRESH_LINK , user_data , " handle " ) ;
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
delayed_action_handle_REFRESH_LINK ( platform , GPOINTER_TO_INT ( user_data ) ) ;
2015-04-06 18:29:36 +02:00
return TRUE ;
}
static gboolean
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
delayed_action_handle_all ( NMPlatform * platform , gboolean read_netlink )
2015-04-06 18:29:36 +02:00
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
gboolean any = FALSE ;
nm_clear_g_source ( & priv - > delayed_action . idle_id ) ;
priv - > delayed_action . is_handling + + ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
if ( read_netlink )
delayed_action_schedule ( platform , DELAYED_ACTION_TYPE_READ_NETLINK , NULL ) ;
2015-04-06 18:29:36 +02:00
while ( delayed_action_handle_one ( platform ) )
any = TRUE ;
priv - > delayed_action . is_handling - - ;
return any ;
}
static gboolean
delayed_action_handle_idle ( gpointer user_data )
{
NM_LINUX_PLATFORM_GET_PRIVATE ( user_data ) - > delayed_action . idle_id = 0 ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
delayed_action_handle_all ( user_data , FALSE ) ;
2015-04-06 18:29:36 +02:00
return G_SOURCE_REMOVE ;
}
static void
delayed_action_schedule ( NMPlatform * platform , DelayedActionType action_type , gpointer user_data )
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
DelayedActionType iflags ;
nm_assert ( action_type ! = DELAYED_ACTION_TYPE_NONE ) ;
if ( NM_FLAGS_HAS ( action_type , DELAYED_ACTION_TYPE_REFRESH_LINK ) ) {
nm_assert ( nm_utils_is_power_of_two ( action_type ) ) ;
if ( _nm_utils_ptrarray_find_first ( priv - > delayed_action . list_refresh_link - > pdata , priv - > delayed_action . list_refresh_link - > len , user_data ) < 0 )
g_ptr_array_add ( priv - > delayed_action . list_refresh_link , user_data ) ;
} else if ( NM_FLAGS_HAS ( action_type , DELAYED_ACTION_TYPE_MASTER_CONNECTED ) ) {
nm_assert ( nm_utils_is_power_of_two ( action_type ) ) ;
if ( _nm_utils_ptrarray_find_first ( priv - > delayed_action . list_master_connected - > pdata , priv - > delayed_action . list_master_connected - > len , user_data ) < 0 )
g_ptr_array_add ( priv - > delayed_action . list_master_connected , user_data ) ;
} else
nm_assert ( ! user_data ) ;
priv - > delayed_action . flags | = action_type ;
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
if ( _LOGT_ENABLED ( ) ) {
for ( iflags = ( DelayedActionType ) 0x1LL ; iflags < = DELAYED_ACTION_TYPE_MAX ; iflags < < = 1 ) {
if ( NM_FLAGS_HAS ( action_type , iflags ) )
_LOGT_delayed_action ( iflags , user_data , " schedule " ) ;
}
}
2015-04-06 18:29:36 +02:00
if ( priv - > delayed_action . is_handling = = 0 & & priv - > delayed_action . idle_id = = 0 )
priv - > delayed_action . idle_id = g_idle_add ( delayed_action_handle_idle , platform ) ;
}
/******************************************************************/
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
static void
2015-06-19 16:24:18 +02:00
cache_prune_candidates_record_all ( NMPlatform * platform , NMPObjectType obj_type )
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
priv - > prune_candidates = nmp_cache_lookup_all_to_hash ( priv - > cache ,
2015-06-18 11:44:36 +02:00
nmp_cache_id_init_object_type ( NMP_CACHE_ID_STATIC , obj_type , FALSE ) ,
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
priv - > prune_candidates ) ;
_LOGT ( " cache-prune: record %s (now %u candidates) " , nmp_class_from_type ( obj_type ) - > obj_type_name ,
priv - > prune_candidates ? g_hash_table_size ( priv - > prune_candidates ) : 0 ) ;
}
static void
cache_prune_candidates_record_one ( NMPlatform * platform , NMPObject * obj )
{
NMLinuxPlatformPrivate * priv ;
if ( ! obj )
return ;
priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
if ( ! priv - > prune_candidates )
priv - > prune_candidates = g_hash_table_new_full ( NULL , NULL , ( GDestroyNotify ) nmp_object_unref , NULL ) ;
if ( _LOGT_ENABLED ( ) & & ! g_hash_table_contains ( priv - > prune_candidates , obj ) )
_LOGT ( " cache-prune: record-one: %s " , nmp_object_to_string ( obj , NMP_OBJECT_TO_STRING_ALL , NULL , 0 ) ) ;
g_hash_table_add ( priv - > prune_candidates , nmp_object_ref ( obj ) ) ;
}
static void
cache_prune_candidates_drop ( NMPlatform * platform , const NMPObject * obj )
{
NMLinuxPlatformPrivate * priv ;
if ( ! obj )
return ;
priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
if ( priv - > prune_candidates ) {
if ( _LOGT_ENABLED ( ) & & g_hash_table_contains ( priv - > prune_candidates , obj ) )
_LOGT ( " cache-prune: drop-one: %s " , nmp_object_to_string ( obj , NMP_OBJECT_TO_STRING_ALL , NULL , 0 ) ) ;
g_hash_table_remove ( priv - > prune_candidates , obj ) ;
}
}
static void
cache_prune_candidates_prune ( NMPlatform * platform )
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
GHashTable * prune_candidates ;
GHashTableIter iter ;
const NMPObject * obj ;
gboolean was_visible ;
NMPCacheOpsType cache_op ;
if ( ! priv - > prune_candidates )
return ;
prune_candidates = priv - > prune_candidates ;
priv - > prune_candidates = NULL ;
g_hash_table_iter_init ( & iter , prune_candidates ) ;
while ( g_hash_table_iter_next ( & iter , ( gpointer * ) & obj , NULL ) ) {
auto_nmp_obj NMPObject * obj_cache = NULL ;
_LOGT ( " cache-prune: prune %s " , nmp_object_to_string ( obj , NMP_OBJECT_TO_STRING_ALL , NULL , 0 ) ) ;
cache_op = nmp_cache_remove ( priv - > cache , obj , TRUE , & obj_cache , & was_visible , cache_pre_hook , platform ) ;
do_emit_signal ( platform , obj_cache , cache_op , was_visible , NM_PLATFORM_REASON_INTERNAL ) ;
}
g_hash_table_unref ( prune_candidates ) ;
}
static void
cache_delayed_deletion_prune ( NMPlatform * platform )
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
GPtrArray * prune_list = NULL ;
GHashTableIter iter ;
guint i ;
NMPObject * obj ;
if ( g_hash_table_size ( priv - > delayed_deletion ) = = 0 )
return ;
g_hash_table_iter_init ( & iter , priv - > delayed_deletion ) ;
while ( g_hash_table_iter_next ( & iter , NULL , ( gpointer * ) & obj ) ) {
if ( obj ) {
if ( ! prune_list )
prune_list = g_ptr_array_new_full ( g_hash_table_size ( priv - > delayed_deletion ) , ( GDestroyNotify ) nmp_object_unref ) ;
g_ptr_array_add ( prune_list , nmp_object_ref ( obj ) ) ;
}
}
g_hash_table_remove_all ( priv - > delayed_deletion ) ;
if ( prune_list ) {
for ( i = 0 ; i < prune_list - > len ; i + + ) {
obj = prune_list - > pdata [ i ] ;
_LOGT ( " delayed-deletion: delete %s " , nmp_object_to_string ( obj , NMP_OBJECT_TO_STRING_ID , NULL , 0 ) ) ;
cache_remove_netlink ( platform , obj , NULL , NULL , NM_PLATFORM_REASON_EXTERNAL ) ;
}
g_ptr_array_unref ( prune_list ) ;
}
}
2015-04-06 18:29:36 +02:00
static void
cache_pre_hook ( NMPCache * cache , const NMPObject * old , const NMPObject * new , NMPCacheOpsType ops_type , gpointer user_data )
{
NMPlatform * platform = NM_PLATFORM ( user_data ) ;
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
const NMPClass * klass ;
char str_buf [ sizeof ( _nm_platform_to_string_buffer ) ] ;
char str_buf2 [ sizeof ( _nm_platform_to_string_buffer ) ] ;
nm_assert ( old | | new ) ;
nm_assert ( NM_IN_SET ( ops_type , NMP_CACHE_OPS_ADDED , NMP_CACHE_OPS_REMOVED , NMP_CACHE_OPS_UPDATED ) ) ;
nm_assert ( ops_type ! = NMP_CACHE_OPS_ADDED | | ( old = = NULL & & NMP_OBJECT_IS_VALID ( new ) & & nmp_object_is_alive ( new ) ) ) ;
nm_assert ( ops_type ! = NMP_CACHE_OPS_REMOVED | | ( new = = NULL & & NMP_OBJECT_IS_VALID ( old ) & & nmp_object_is_alive ( old ) ) ) ;
nm_assert ( ops_type ! = NMP_CACHE_OPS_UPDATED | | ( NMP_OBJECT_IS_VALID ( old ) & & nmp_object_is_alive ( old ) & & NMP_OBJECT_IS_VALID ( new ) & & nmp_object_is_alive ( new ) ) ) ;
nm_assert ( new = = NULL | | old = = NULL | | nmp_object_id_equal ( new , old ) ) ;
klass = old ? NMP_OBJECT_GET_CLASS ( old ) : NMP_OBJECT_GET_CLASS ( new ) ;
nm_assert ( klass = = ( new ? NMP_OBJECT_GET_CLASS ( new ) : NMP_OBJECT_GET_CLASS ( old ) ) ) ;
_LOGT ( " update-cache-%s: %s: %s%s%s " ,
klass - > obj_type_name ,
( ops_type = = NMP_CACHE_OPS_UPDATED
? " UPDATE "
: ( ops_type = = NMP_CACHE_OPS_REMOVED
? " REMOVE "
: ( ops_type = = NMP_CACHE_OPS_ADDED ) ? " ADD " : " ??? " ) ) ,
( ops_type ! = NMP_CACHE_OPS_ADDED
? nmp_object_to_string ( old , NMP_OBJECT_TO_STRING_ALL , str_buf2 , sizeof ( str_buf2 ) )
: nmp_object_to_string ( new , NMP_OBJECT_TO_STRING_ALL , str_buf2 , sizeof ( str_buf2 ) ) ) ,
( ops_type = = NMP_CACHE_OPS_UPDATED ) ? " -> " : " " ,
( ops_type = = NMP_CACHE_OPS_UPDATED
? nmp_object_to_string ( new , NMP_OBJECT_TO_STRING_ALL , str_buf , sizeof ( str_buf ) )
: " " ) ) ;
switch ( klass - > obj_type ) {
2015-06-19 16:24:18 +02:00
case NMP_OBJECT_TYPE_LINK :
2015-04-06 18:29:36 +02:00
{
/* check whether changing a slave link can cause a master link (bridge or bond) to go up/down */
if ( old
& & nmp_cache_link_connected_needs_toggle_by_ifindex ( priv - > cache , old - > link . master , new , old ) )
delayed_action_schedule ( platform , DELAYED_ACTION_TYPE_MASTER_CONNECTED , GINT_TO_POINTER ( old - > link . master ) ) ;
if ( new
& & ( ! old | | old - > link . master ! = new - > link . master )
& & nmp_cache_link_connected_needs_toggle_by_ifindex ( priv - > cache , new - > link . master , new , old ) )
delayed_action_schedule ( platform , DELAYED_ACTION_TYPE_MASTER_CONNECTED , GINT_TO_POINTER ( new - > link . master ) ) ;
}
{
/* check whether we are about to change a master link that needs toggling connected state. */
2015-06-25 17:49:09 +02:00
if ( new /* <-- nonsensical, make coverity happy */
& & nmp_cache_link_connected_needs_toggle ( cache , new , new , old ) )
2015-04-06 18:29:36 +02:00
delayed_action_schedule ( platform , DELAYED_ACTION_TYPE_MASTER_CONNECTED , GINT_TO_POINTER ( new - > link . ifindex ) ) ;
}
{
int ifindex = 0 ;
/* if we remove a link (from netlink), we must refresh the addresses and routes */
2015-06-25 17:49:09 +02:00
if ( ops_type = = NMP_CACHE_OPS_REMOVED
& & old /* <-- nonsensical, make coverity happy */ )
2015-04-06 18:29:36 +02:00
ifindex = old - > link . ifindex ;
else if ( ops_type = = NMP_CACHE_OPS_UPDATED
2015-06-25 17:49:09 +02:00
& & old & & new /* <-- nonsensical, make coverity happy */
2015-04-06 18:29:36 +02:00
& & ! new - > _link . netlink . is_in_netlink
& & new - > _link . netlink . is_in_netlink ! = old - > _link . netlink . is_in_netlink )
ifindex = new - > link . ifindex ;
if ( ifindex > 0 ) {
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
delayed_action_schedule ( platform ,
DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ADDRESSES |
DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ADDRESSES |
DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ROUTES |
DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ROUTES ,
NULL ) ;
2015-04-06 18:29:36 +02:00
}
}
{
/* if a link goes down, we must refresh routes */
if ( ops_type = = NMP_CACHE_OPS_UPDATED
2015-06-25 17:49:09 +02:00
& & old & & new /* <-- nonsensical, make coverity happy */
2015-04-06 18:29:36 +02:00
& & old - > _link . netlink . is_in_netlink
& & NM_FLAGS_HAS ( old - > link . flags , IFF_LOWER_UP )
& & new - > _link . netlink . is_in_netlink
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
& & ! NM_FLAGS_HAS ( new - > link . flags , IFF_LOWER_UP ) ) {
delayed_action_schedule ( platform ,
DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ROUTES |
DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ROUTES ,
NULL ) ;
}
2015-04-06 18:29:36 +02:00
}
{
/* on enslave/release, we also refresh the master. */
int ifindex1 = 0 , ifindex2 = 0 ;
gboolean changed_master , changed_connected ;
changed_master = ( new & & new - > _link . netlink . is_in_netlink & & new - > link . master > 0 ? new - > link . master : 0 )
! = ( old & & old - > _link . netlink . is_in_netlink & & old - > link . master > 0 ? old - > link . master : 0 ) ;
changed_connected = ( new & & new - > _link . netlink . is_in_netlink ? NM_FLAGS_HAS ( new - > link . flags , IFF_LOWER_UP ) : 2 )
! = ( old & & old - > _link . netlink . is_in_netlink ? NM_FLAGS_HAS ( old - > link . flags , IFF_LOWER_UP ) : 2 ) ;
if ( changed_master | | changed_connected ) {
ifindex1 = ( old & & old - > _link . netlink . is_in_netlink & & old - > link . master > 0 ) ? old - > link . master : 0 ;
ifindex2 = ( new & & new - > _link . netlink . is_in_netlink & & new - > link . master > 0 ) ? new - > link . master : 0 ;
if ( ifindex1 > 0 )
delayed_action_schedule ( platform , DELAYED_ACTION_TYPE_REFRESH_LINK , GINT_TO_POINTER ( ifindex1 ) ) ;
if ( ifindex2 > 0 & & ifindex1 ! = ifindex2 )
delayed_action_schedule ( platform , DELAYED_ACTION_TYPE_REFRESH_LINK , GINT_TO_POINTER ( ifindex2 ) ) ;
}
}
break ;
2015-06-19 16:24:18 +02:00
case NMP_OBJECT_TYPE_IP4_ADDRESS :
case NMP_OBJECT_TYPE_IP6_ADDRESS :
2015-04-06 18:29:36 +02:00
{
/* Address deletion is sometimes accompanied by route deletion. We need to
* check all routes belonging to the same interface . */
if ( ops_type = = NMP_CACHE_OPS_REMOVED ) {
delayed_action_schedule ( platform ,
2015-06-19 16:24:18 +02:00
( klass - > obj_type = = NMP_OBJECT_TYPE_IP4_ADDRESS )
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
? DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ROUTES
: DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ROUTES ,
NULL ) ;
2015-04-06 18:29:36 +02:00
}
}
default :
break ;
}
}
static NMPCacheOpsType
cache_remove_netlink ( NMPlatform * platform , const NMPObject * obj_needle , NMPObject * * out_obj_cache , gboolean * out_was_visible , NMPlatformReason reason )
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
NMPObject * obj_cache ;
gboolean was_visible ;
NMPCacheOpsType cache_op ;
cache_op = nmp_cache_remove_netlink ( priv - > cache , obj_needle , & obj_cache , & was_visible , cache_pre_hook , platform ) ;
do_emit_signal ( platform , obj_cache , cache_op , was_visible , NM_PLATFORM_REASON_INTERNAL ) ;
if ( out_obj_cache )
* out_obj_cache = obj_cache ;
else
nmp_object_unref ( obj_cache ) ;
if ( out_was_visible )
* out_was_visible = was_visible ;
return cache_op ;
}
static NMPCacheOpsType
cache_update_netlink ( NMPlatform * platform , NMPObject * obj , NMPObject * * out_obj_cache , gboolean * out_was_visible , NMPlatformReason reason )
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
NMPObject * obj_cache ;
gboolean was_visible ;
NMPCacheOpsType cache_op ;
/* This is basically a convenience method to call nmp_cache_update() and do_emit_signal()
* at once . */
cache_op = nmp_cache_update_netlink ( priv - > cache , obj , & obj_cache , & was_visible , cache_pre_hook , platform ) ;
do_emit_signal ( platform , obj_cache , cache_op , was_visible , reason ) ;
2015-04-25 14:53:39 +02:00
if ( out_obj_cache )
* out_obj_cache = obj_cache ;
else
nmp_object_unref ( obj_cache ) ;
if ( out_was_visible )
* out_was_visible = was_visible ;
2013-03-27 22:53:55 +01:00
2015-04-25 14:53:39 +02:00
return cache_op ;
2013-03-27 22:23:24 +01:00
}
2015-04-25 14:53:39 +02:00
/******************************************************************/
2015-04-06 18:29:36 +02:00
static void
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
_new_sequence_number ( NMPlatform * platform , guint32 seq )
2015-04-06 18:29:36 +02:00
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
_LOGT ( " _new_sequence_number(): new sequence number %u " , seq ) ;
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
priv - > nlh_seq_expect = seq ;
2015-04-06 18:29:36 +02:00
}
static void
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
do_request_link ( NMPlatform * platform , int ifindex , const char * name , gboolean handle_delayed_action )
2015-04-06 18:29:36 +02:00
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
guint32 seq ;
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
_LOGT ( " do_request_link (%d,%s) " , ifindex , name ? name : " " ) ;
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
if ( ifindex > 0 ) {
NMPObject * obj ;
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
cache_prune_candidates_record_one ( platform ,
( NMPObject * ) nmp_cache_lookup_link ( priv - > cache , ifindex ) ) ;
obj = nmp_object_new_link ( ifindex ) ;
_LOGT ( " delayed-deletion: protect object %s " , nmp_object_to_string ( obj , NMP_OBJECT_TO_STRING_ID , NULL , 0 ) ) ;
g_hash_table_insert ( priv - > delayed_deletion , obj , NULL ) ;
2015-04-06 18:29:36 +02:00
}
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
event_handler_read_netlink_all ( platform , FALSE ) ;
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
if ( _nl_sock_request_link ( platform , priv - > nlh_event , ifindex , name , & seq ) = = 0 )
_new_sequence_number ( platform , seq ) ;
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
event_handler_read_netlink_all ( platform , TRUE ) ;
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
cache_delayed_deletion_prune ( platform ) ;
cache_prune_candidates_prune ( platform ) ;
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
if ( handle_delayed_action )
delayed_action_handle_all ( platform , FALSE ) ;
}
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
static void
2015-06-19 16:24:18 +02:00
do_request_one_type ( NMPlatform * platform , NMPObjectType obj_type , gboolean handle_delayed_action )
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
{
do_request_all ( platform , delayed_action_refresh_from_object_type ( obj_type ) , handle_delayed_action ) ;
}
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
static void
do_request_all ( NMPlatform * platform , DelayedActionType action_type , gboolean handle_delayed_action )
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
guint32 seq ;
DelayedActionType iflags ;
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
nm_assert ( ! NM_FLAGS_ANY ( action_type , ~ DELAYED_ACTION_TYPE_REFRESH_ALL ) ) ;
action_type & = DELAYED_ACTION_TYPE_REFRESH_ALL ;
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
for ( iflags = ( DelayedActionType ) 0x1LL ; iflags < = DELAYED_ACTION_TYPE_MAX ; iflags < < = 1 ) {
if ( NM_FLAGS_HAS ( action_type , iflags ) )
cache_prune_candidates_record_all ( platform , delayed_action_refresh_to_object_type ( iflags ) ) ;
}
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
for ( iflags = ( DelayedActionType ) 0x1LL ; iflags < = DELAYED_ACTION_TYPE_MAX ; iflags < < = 1 ) {
if ( NM_FLAGS_HAS ( action_type , iflags ) ) {
2015-06-19 16:24:18 +02:00
NMPObjectType obj_type = delayed_action_refresh_to_object_type ( iflags ) ;
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
/* clear any delayed action that request a refresh of this object type. */
priv - > delayed_action . flags & = ~ iflags ;
2015-06-19 16:24:18 +02:00
if ( obj_type = = NMP_OBJECT_TYPE_LINK ) {
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
priv - > delayed_action . flags & = ~ DELAYED_ACTION_TYPE_REFRESH_LINK ;
g_ptr_array_set_size ( priv - > delayed_action . list_refresh_link , 0 ) ;
}
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
event_handler_read_netlink_all ( platform , FALSE ) ;
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
if ( _nl_sock_request_all ( platform , priv - > nlh_event , obj_type , & seq ) = = 0 )
_new_sequence_number ( platform , seq ) ;
2015-04-06 18:29:36 +02:00
}
}
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
event_handler_read_netlink_all ( platform , TRUE ) ;
cache_prune_candidates_prune ( platform ) ;
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
if ( handle_delayed_action )
delayed_action_handle_all ( platform , FALSE ) ;
2015-04-06 18:29:36 +02:00
}
static gboolean
2015-06-19 16:24:18 +02:00
kernel_add_object ( NMPlatform * platform , NMPObjectType obj_type , const struct nl_object * nlo )
2015-04-06 18:29:36 +02:00
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
int nle ;
g_return_val_if_fail ( nlo , FALSE ) ;
switch ( obj_type ) {
2015-06-19 16:24:18 +02:00
case NMP_OBJECT_TYPE_LINK :
2015-04-06 18:29:36 +02:00
nle = rtnl_link_add ( priv - > nlh , ( struct rtnl_link * ) nlo , NLM_F_CREATE ) ;
break ;
2015-06-19 16:24:18 +02:00
case NMP_OBJECT_TYPE_IP4_ADDRESS :
case NMP_OBJECT_TYPE_IP6_ADDRESS :
2015-04-06 18:29:36 +02:00
nle = rtnl_addr_add ( priv - > nlh , ( struct rtnl_addr * ) nlo , NLM_F_CREATE | NLM_F_REPLACE ) ;
break ;
2015-06-19 16:24:18 +02:00
case NMP_OBJECT_TYPE_IP4_ROUTE :
case NMP_OBJECT_TYPE_IP6_ROUTE :
2015-04-06 18:29:36 +02:00
nle = rtnl_route_add ( priv - > nlh , ( struct rtnl_route * ) nlo , NLM_F_CREATE | NLM_F_REPLACE ) ;
break ;
default :
g_return_val_if_reached ( - NLE_INVAL ) ;
}
_LOGT ( " kernel-add-%s: returned %s (%d) " ,
nmp_class_from_type ( obj_type ) - > obj_type_name , nl_geterror ( nle ) , - nle ) ;
switch ( nle ) {
case - NLE_SUCCESS :
return - NLE_SUCCESS ;
case - NLE_EXIST :
/* NLE_EXIST is considered equivalent to success to avoid race conditions. You
* never know when something sends an identical object just before
* NetworkManager . */
2015-06-19 16:24:18 +02:00
if ( obj_type ! = NMP_OBJECT_TYPE_LINK )
2015-04-06 18:29:36 +02:00
return - NLE_SUCCESS ;
/* fall-through */
default :
return nle ;
}
}
static int
2015-06-19 16:24:18 +02:00
kernel_delete_object ( NMPlatform * platform , NMPObjectType object_type , const struct nl_object * object )
2015-04-06 18:29:36 +02:00
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
int nle ;
switch ( object_type ) {
2015-06-19 16:24:18 +02:00
case NMP_OBJECT_TYPE_LINK :
2015-04-06 18:29:36 +02:00
nle = rtnl_link_delete ( priv - > nlh , ( struct rtnl_link * ) object ) ;
break ;
2015-06-19 16:24:18 +02:00
case NMP_OBJECT_TYPE_IP4_ADDRESS :
case NMP_OBJECT_TYPE_IP6_ADDRESS :
2015-04-06 18:29:36 +02:00
nle = rtnl_addr_delete ( priv - > nlh , ( struct rtnl_addr * ) object , 0 ) ;
break ;
2015-06-19 16:24:18 +02:00
case NMP_OBJECT_TYPE_IP4_ROUTE :
case NMP_OBJECT_TYPE_IP6_ROUTE :
2015-04-06 18:29:36 +02:00
nle = rtnl_route_delete ( priv - > nlh , ( struct rtnl_route * ) object , 0 ) ;
break ;
default :
g_assert_not_reached ( ) ;
}
switch ( nle ) {
case - NLE_SUCCESS :
return NLE_SUCCESS ;
case - NLE_OBJ_NOTFOUND :
_LOGT ( " kernel-delete-%s: failed with \" %s \" (%d), meaning the object was already removed " ,
nmp_class_from_type ( object_type ) - > obj_type_name , nl_geterror ( nle ) , - nle ) ;
return - NLE_SUCCESS ;
case - NLE_FAILURE :
2015-06-19 16:24:18 +02:00
if ( object_type = = NMP_OBJECT_TYPE_IP6_ADDRESS ) {
2015-04-06 18:29:36 +02:00
/* On RHEL7 kernel, deleting a non existing address fails with ENXIO (which libnl maps to NLE_FAILURE) */
_LOGT ( " kernel-delete-%s: deleting address failed with \" %s \" (%d), meaning the address was already removed " ,
nmp_class_from_type ( object_type ) - > obj_type_name , nl_geterror ( nle ) , - nle ) ;
return NLE_SUCCESS ;
}
break ;
case - NLE_NOADDR :
2015-06-19 16:24:18 +02:00
if ( object_type = = NMP_OBJECT_TYPE_IP4_ADDRESS | | object_type = = NMP_OBJECT_TYPE_IP6_ADDRESS ) {
2015-04-06 18:29:36 +02:00
_LOGT ( " kernel-delete-%s: deleting address failed with \" %s \" (%d), meaning the address was already removed " ,
nmp_class_from_type ( object_type ) - > obj_type_name , nl_geterror ( nle ) , - nle ) ;
return - NLE_SUCCESS ;
}
break ;
default :
break ;
}
_LOGT ( " kernel-delete-%s: failed with %s (%d) " ,
nmp_class_from_type ( object_type ) - > obj_type_name , nl_geterror ( nle ) , - nle ) ;
return nle ;
}
static int
kernel_change_link ( NMPlatform * platform , struct rtnl_link * nlo , gboolean * complete_from_cache )
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
struct nl_msg * msg ;
int nle ;
const int nlflags = 0 ;
int ifindex ;
ifindex = rtnl_link_get_ifindex ( nlo ) ;
g_return_val_if_fail ( ifindex > 0 , FALSE ) ;
/* Previously, we were using rtnl_link_change(), which builds a request based
* on the diff with an original link instance .
*
* The diff only reused ifi_family , ifi_index , ifi_flags , and name from
* the original link ( see rtnl_link_build_change_request ( ) ) .
*
* We don ' t do that anymore as we don ' t have an " orig " netlink instance that
* we can use . Instead the caller must ensure to properly initialize @ nlo ,
* especially it must set family , ifindex ( or ifname ) and flags .
* ifname should be set * only * if the caller wishes to change the name .
*
* @ complete_from_cache is a convenience to copy the link flags over the link inside
* the platform cache . */
if ( * complete_from_cache ) {
const NMPObject * obj_cache ;
obj_cache = nmp_cache_lookup_link ( priv - > cache , ifindex ) ;
if ( ! obj_cache | | ! obj_cache - > _link . netlink . is_in_netlink ) {
_LOGT ( " kernel-change-link: failure changing link %d: cannot complete link " , ifindex ) ;
* complete_from_cache = FALSE ;
return - NLE_INVAL ;
}
rtnl_link_set_flags ( nlo , obj_cache - > link . flags ) ;
/* If the caller wants to rename the link, he should explicitly set
* rtnl_link_set_name ( ) . In all other cases , it should leave the name
* unset . Unfortunately , there is not public API in libnl to modify the
* attribute mask and clear ( link - > ce_mask = ~ LINK_ATTR_IFNAME ) , so we
* require the caller to do the right thing - - i . e . don ' t set the name .
*/
}
/* We don't use rtnl_link_change() because we have no original rtnl_link object
* at hand . We also don ' t use rtnl_link_add ( ) because that doesn ' t have the
* hack to retry with RTM_SETLINK . Reimplement a mix of both . */
nle = rtnl_link_build_add_request ( nlo , nlflags , & msg ) ;
if ( nle < 0 ) {
_LOGT ( " kernel-change-link: failure changing link %d: cannot construct message (%s, %d) " ,
ifindex , nl_geterror ( nle ) , - nle ) ;
return nle ;
}
retry :
nle = nl_send_auto_complete ( priv - > nlh , msg ) ;
if ( nle < 0 )
goto errout ;
nle = nl_wait_for_ack ( priv - > nlh ) ;
if ( nle = = - NLE_OPNOTSUPP & & nlmsg_hdr ( msg ) - > nlmsg_type = = RTM_NEWLINK ) {
nlmsg_hdr ( msg ) - > nlmsg_type = RTM_SETLINK ;
goto retry ;
}
errout :
nlmsg_free ( msg ) ;
/* NLE_EXIST is considered equivalent to success to avoid race conditions. You
* never know when something sends an identical object just before
* NetworkManager .
*
* When netlink returns NLE_OBJ_NOTFOUND , it usually means it failed to find
* firmware for the device , especially on nm_platform_link_set_up ( ) .
* This is basically the same check as in the original code and could
* potentially be improved .
*/
switch ( nle ) {
case - NLE_SUCCESS :
_LOGT ( " kernel-change-link: success changing link %d " , ifindex ) ;
break ;
case - NLE_EXIST :
_LOGT ( " kernel-change-link: success changing link %d: %s (%d) " ,
ifindex , nl_geterror ( nle ) , - nle ) ;
break ;
case - NLE_OBJ_NOTFOUND :
_LOGT ( " kernel-change-link: failure changing link %d: firmware not found (%s, %d) " ,
ifindex , nl_geterror ( nle ) , - nle ) ;
break ;
default :
_LOGT ( " kernel-change-link: failure changing link %d: netlink error (%s, %d) " ,
ifindex , nl_geterror ( nle ) , - nle ) ;
break ;
}
return nle ;
}
2013-03-27 22:23:24 +01:00
static void
ref_object ( struct nl_object * obj , void * data )
{
struct nl_object * * out = data ;
nl_object_get ( obj ) ;
* out = obj ;
}
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
static int
event_seq_check ( struct nl_msg * msg , gpointer user_data )
{
NMPlatform * platform = NM_PLATFORM ( user_data ) ;
2015-06-25 17:49:09 +02:00
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
struct nlmsghdr * hdr ;
hdr = nlmsg_hdr ( msg ) ;
if ( hdr - > nlmsg_seq = = 0 )
return NL_OK ;
priv - > nlh_seq_last = hdr - > nlmsg_seq ;
if ( priv - > nlh_seq_expect = = 0 )
_LOGT ( " event_seq_check(): seq %u received (not waited) " , hdr - > nlmsg_seq ) ;
else if ( hdr - > nlmsg_seq = = priv - > nlh_seq_expect ) {
_LOGT ( " event_seq_check(): seq %u received " , hdr - > nlmsg_seq ) ;
priv - > nlh_seq_expect = 0 ;
} else
_LOGT ( " event_seq_check(): seq %u received (wait for %u) " , hdr - > nlmsg_seq , priv - > nlh_seq_last ) ;
return NL_OK ;
}
static int
2015-06-25 17:49:09 +02:00
event_err ( struct sockaddr_nl * nla , struct nlmsgerr * nlerr , gpointer platform )
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
{
2015-06-25 17:49:09 +02:00
_LOGT ( " event_err(): error from kernel: %s (%d) for request %d " ,
strerror ( nlerr ? - nlerr - > error : 0 ) ,
nlerr ? - nlerr - > error : 0 ,
NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) - > nlh_seq_last ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
return NL_OK ;
}
2013-03-27 22:23:24 +01:00
/* This function does all the magic to avoid race conditions caused
* by concurrent usage of synchronous commands and an asynchronous cache . This
* might be a nice future addition to libnl but it requires to do all operations
* through the cache manager . In this case , nm - linux - platform serves as the
* cache manager instead of the one provided by libnl .
*/
static int
event_notification ( struct nl_msg * msg , gpointer user_data )
{
NMPlatform * platform = NM_PLATFORM ( user_data ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( user_data ) ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
auto_nl_object struct nl_object * nlo = NULL ;
auto_nmp_obj NMPObject * obj = NULL ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
struct nlmsghdr * msghdr ;
2015-05-29 11:12:15 +02:00
char buf_nlmsg_type [ 16 ] ;
2013-03-27 22:23:24 +01:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
msghdr = nlmsg_hdr ( msg ) ;
2014-01-07 17:21:12 +01:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
if ( _support_kernel_extended_ifa_flags_still_undecided ( ) & & msghdr - > nlmsg_type = = RTM_NEWADDR )
2015-05-06 11:55:02 +02:00
_support_kernel_extended_ifa_flags_detect ( msg ) ;
2014-01-07 17:21:12 +01:00
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
nl_msg_parse ( msg , ref_object , & nlo ) ;
if ( ! nlo )
2013-03-27 22:23:24 +01:00
return NL_OK ;
platform: avoid storing unknown netlink object types (bgo #742928)
Testing WWAN connections through a Nokia Series 40 phone, addresses of family
AF_PHONET end up triggering an assert() in object_has_ifindex(), just because
object_type_from_nl_object() only handles AF_INET and AF_INET6 address.
In order to avoid this kind of problems, we'll try to make sure that the object
caches kept by NM only store known object types.
(fixup by dcbw to use cached passed to cache_remove_unknown())
https://bugzilla.gnome.org/show_bug.cgi?id=742928
Connect: ppp0 <--> /dev/ttyACM0
nm-pppd-plugin-Message: nm-ppp-plugin: (nm_phasechange): status 5 / phase 'establish'
NetworkManager[27434]: <info> (ppp0): new Generic device (driver: 'unknown' ifindex: 12)
NetworkManager[27434]: <info> (ppp0): exported as /org/freedesktop/NetworkManager/Devices/4
[Thread 0x7ffff1ecf700 (LWP 27439) exited]
NetworkManager[27434]: <info> (ttyACM0): device state change: ip-config -> deactivating (reason 'user-requested') [70 110 39]
Terminating on signal 15
nm-pppd-plugin-Message: nm-ppp-plugin: (nm_phasechange): status 10 / phase 'terminate'
**
NetworkManager:ERROR:platform/nm-linux-platform.c:1534:object_has_ifindex: code should not be reached
Program received signal SIGABRT, Aborted.
0x00007ffff4692a97 in raise () from /usr/lib/libc.so.6
(gdb) bt
#0 0x00007ffff4692a97 in raise () from /usr/lib/libc.so.6
#1 0x00007ffff4693e6a in abort () from /usr/lib/libc.so.6
#2 0x00007ffff4c8d7f5 in g_assertion_message () from /usr/lib/libglib-2.0.so.0
#3 0x00007ffff4c8d88a in g_assertion_message_expr () from /usr/lib/libglib-2.0.so.0
#4 0x0000000000472b91 in object_has_ifindex (object=0x8a8320, ifindex=12) at platform/nm-linux-platform.c:1534
#5 0x0000000000472bec in check_cache_items (platform=0x7fe8a0, cache=0x7fda30, ifindex=12) at platform/nm-linux-platform.c:1549
#6 0x0000000000472de3 in announce_object (platform=0x7fe8a0, object=0x8a8c30, change_type=NM_PLATFORM_SIGNAL_REMOVED, reason=NM_PLATFORM_REASON_EXTERNAL) at platform/nm-linux-platform.c:1617
#7 0x0000000000473dd2 in event_notification (msg=0x8a7970, user_data=0x7fe8a0) at platform/nm-linux-platform.c:1992
#8 0x00007ffff5ee14de in nl_recvmsgs_report () from /usr/lib/libnl-3.so.200
#9 0x00007ffff5ee1849 in nl_recvmsgs () from /usr/lib/libnl-3.so.200
#10 0x00000000004794df in event_handler (channel=0x7fc930, io_condition=G_IO_IN, user_data=0x7fe8a0) at platform/nm-linux-platform.c:4152
#11 0x00007ffff4c6791d in g_main_context_dispatch () from /usr/lib/libglib-2.0.so.0
#12 0x00007ffff4c67cf8 in ?? () from /usr/lib/libglib-2.0.so.0
#13 0x00007ffff4c68022 in g_main_loop_run () from /usr/lib/libglib-2.0.so.0
#14 0x00000000004477ee in main (argc=1, argv=0x7fffffffeaa8) at main.c:447
(gdb) fr 4
#4 0x0000000000472b91 in object_has_ifindex (object=0x8a8320, ifindex=12) at platform/nm-linux-platform.c:1534
1534 g_assert_not_reached ();
2015-01-15 09:18:07 +01:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
if ( _support_user_ipv6ll_still_undecided ( ) & & msghdr - > nlmsg_type = = RTM_NEWLINK )
_support_user_ipv6ll_detect ( ( struct rtnl_link * ) nlo ) ;
2015-08-05 18:20:00 +02:00
switch ( msghdr - > nlmsg_type ) {
case RTM_DELADDR :
case RTM_DELLINK :
case RTM_DELROUTE :
/* The event notifies about a deleted object. We don't need to initialize all the
* fields of the nmp - object . Shortcut nmp_object_from_nl ( ) . */
obj = nmp_object_from_nl ( platform , nlo , TRUE , TRUE ) ;
_LOGD ( " event-notification: %s, seq %u: %s " ,
_nl_nlmsg_type_to_str ( msghdr - > nlmsg_type , buf_nlmsg_type , sizeof ( buf_nlmsg_type ) ) ,
msghdr - > nlmsg_seq , nmp_object_to_string ( obj , NMP_OBJECT_TO_STRING_ID , NULL , 0 ) ) ;
break ;
default :
obj = nmp_object_from_nl ( platform , nlo , FALSE , TRUE ) ;
_LOGD ( " event-notification: %s, seq %u: %s " ,
_nl_nlmsg_type_to_str ( msghdr - > nlmsg_type , buf_nlmsg_type , sizeof ( buf_nlmsg_type ) ) ,
msghdr - > nlmsg_seq , nmp_object_to_string ( obj , NMP_OBJECT_TO_STRING_PUBLIC , NULL , 0 ) ) ;
break ;
}
platform: avoid storing unknown netlink object types (bgo #742928)
Testing WWAN connections through a Nokia Series 40 phone, addresses of family
AF_PHONET end up triggering an assert() in object_has_ifindex(), just because
object_type_from_nl_object() only handles AF_INET and AF_INET6 address.
In order to avoid this kind of problems, we'll try to make sure that the object
caches kept by NM only store known object types.
(fixup by dcbw to use cached passed to cache_remove_unknown())
https://bugzilla.gnome.org/show_bug.cgi?id=742928
Connect: ppp0 <--> /dev/ttyACM0
nm-pppd-plugin-Message: nm-ppp-plugin: (nm_phasechange): status 5 / phase 'establish'
NetworkManager[27434]: <info> (ppp0): new Generic device (driver: 'unknown' ifindex: 12)
NetworkManager[27434]: <info> (ppp0): exported as /org/freedesktop/NetworkManager/Devices/4
[Thread 0x7ffff1ecf700 (LWP 27439) exited]
NetworkManager[27434]: <info> (ttyACM0): device state change: ip-config -> deactivating (reason 'user-requested') [70 110 39]
Terminating on signal 15
nm-pppd-plugin-Message: nm-ppp-plugin: (nm_phasechange): status 10 / phase 'terminate'
**
NetworkManager:ERROR:platform/nm-linux-platform.c:1534:object_has_ifindex: code should not be reached
Program received signal SIGABRT, Aborted.
0x00007ffff4692a97 in raise () from /usr/lib/libc.so.6
(gdb) bt
#0 0x00007ffff4692a97 in raise () from /usr/lib/libc.so.6
#1 0x00007ffff4693e6a in abort () from /usr/lib/libc.so.6
#2 0x00007ffff4c8d7f5 in g_assertion_message () from /usr/lib/libglib-2.0.so.0
#3 0x00007ffff4c8d88a in g_assertion_message_expr () from /usr/lib/libglib-2.0.so.0
#4 0x0000000000472b91 in object_has_ifindex (object=0x8a8320, ifindex=12) at platform/nm-linux-platform.c:1534
#5 0x0000000000472bec in check_cache_items (platform=0x7fe8a0, cache=0x7fda30, ifindex=12) at platform/nm-linux-platform.c:1549
#6 0x0000000000472de3 in announce_object (platform=0x7fe8a0, object=0x8a8c30, change_type=NM_PLATFORM_SIGNAL_REMOVED, reason=NM_PLATFORM_REASON_EXTERNAL) at platform/nm-linux-platform.c:1617
#7 0x0000000000473dd2 in event_notification (msg=0x8a7970, user_data=0x7fe8a0) at platform/nm-linux-platform.c:1992
#8 0x00007ffff5ee14de in nl_recvmsgs_report () from /usr/lib/libnl-3.so.200
#9 0x00007ffff5ee1849 in nl_recvmsgs () from /usr/lib/libnl-3.so.200
#10 0x00000000004794df in event_handler (channel=0x7fc930, io_condition=G_IO_IN, user_data=0x7fe8a0) at platform/nm-linux-platform.c:4152
#11 0x00007ffff4c6791d in g_main_context_dispatch () from /usr/lib/libglib-2.0.so.0
#12 0x00007ffff4c67cf8 in ?? () from /usr/lib/libglib-2.0.so.0
#13 0x00007ffff4c68022 in g_main_loop_run () from /usr/lib/libglib-2.0.so.0
#14 0x00000000004477ee in main (argc=1, argv=0x7fffffffeaa8) at main.c:447
(gdb) fr 4
#4 0x0000000000472b91 in object_has_ifindex (object=0x8a8320, ifindex=12) at platform/nm-linux-platform.c:1534
1534 g_assert_not_reached ();
2015-01-15 09:18:07 +01:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
if ( obj ) {
auto_nmp_obj NMPObject * obj_cache = NULL ;
switch ( msghdr - > nlmsg_type ) {
case RTM_NEWLINK :
2015-06-19 16:24:18 +02:00
if ( NMP_OBJECT_GET_TYPE ( obj ) = = NMP_OBJECT_TYPE_LINK
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
& & g_hash_table_lookup ( priv - > delayed_deletion , obj ) ! = NULL ) {
/* the object is scheduled for delayed deletion. Replace that object
* by clearing the value from priv - > delayed_deletion . */
_LOGT ( " delayed-deletion: clear delayed deletion of protected object %s " , nmp_object_to_string ( obj , NMP_OBJECT_TO_STRING_ID , NULL , 0 ) ) ;
g_hash_table_insert ( priv - > delayed_deletion , nmp_object_ref ( obj ) , NULL ) ;
}
/* fall-through */
case RTM_NEWADDR :
case RTM_NEWROUTE :
cache_update_netlink ( platform , obj , & obj_cache , NULL , NM_PLATFORM_REASON_EXTERNAL ) ;
break ;
case RTM_DELLINK :
2015-06-19 16:24:18 +02:00
if ( NMP_OBJECT_GET_TYPE ( obj ) = = NMP_OBJECT_TYPE_LINK
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
& & g_hash_table_contains ( priv - > delayed_deletion , obj ) ) {
/* We sometimes receive spurious RTM_DELLINK events. In this case, we want to delay
* the deletion of the object until later . */
_LOGT ( " delayed-deletion: delay deletion of protected object %s " , nmp_object_to_string ( obj , NMP_OBJECT_TO_STRING_ID , NULL , 0 ) ) ;
g_hash_table_insert ( priv - > delayed_deletion , nmp_object_ref ( obj ) , nmp_object_ref ( obj ) ) ;
break ;
}
/* fall-through */
case RTM_DELADDR :
case RTM_DELROUTE :
cache_remove_netlink ( platform , obj , & obj_cache , NULL , NM_PLATFORM_REASON_EXTERNAL ) ;
break ;
default :
break ;
}
2014-09-29 17:58:44 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
cache_prune_candidates_drop ( platform , obj_cache ) ;
}
2013-03-27 22:23:24 +01:00
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
return NL_OK ;
2013-03-27 22:23:24 +01:00
}
/******************************************************************/
2014-03-04 20:15:28 +01:00
static void
2015-08-30 15:51:20 +02:00
_log_dbg_sysctl_set_impl ( NMPlatform * platform , const char * path , const char * value )
2014-03-04 20:15:28 +01:00
{
GError * error = NULL ;
char * contents , * contents_escaped ;
char * value_escaped = g_strescape ( value , NULL ) ;
if ( ! g_file_get_contents ( path , & contents , NULL , & error ) ) {
2015-08-30 15:51:20 +02:00
_LOGD ( " sysctl: setting '%s' to '%s' (current value cannot be read: %s) " , path , value_escaped , error - > message ) ;
2014-03-04 20:15:28 +01:00
g_clear_error ( & error ) ;
} else {
g_strstrip ( contents ) ;
contents_escaped = g_strescape ( contents , NULL ) ;
if ( strcmp ( contents , value ) = = 0 )
2015-08-30 15:51:20 +02:00
_LOGD ( " sysctl: setting '%s' to '%s' (current value is identical) " , path , value_escaped ) ;
2014-03-04 20:15:28 +01:00
else
2015-08-30 15:51:20 +02:00
_LOGD ( " sysctl: setting '%s' to '%s' (current value is '%s') " , path , value_escaped , contents_escaped ) ;
2014-03-04 20:15:28 +01:00
g_free ( contents ) ;
g_free ( contents_escaped ) ;
}
g_free ( value_escaped ) ;
}
2015-08-30 15:51:20 +02:00
# define _log_dbg_sysctl_set(platform, path, value) \
2014-03-04 20:15:28 +01:00
G_STMT_START { \
2015-08-30 15:51:20 +02:00
if ( _LOGD_ENABLED ( ) ) { \
_log_dbg_sysctl_set_impl ( platform , path , value ) ; \
2014-03-04 20:15:28 +01:00
} \
} G_STMT_END
2013-04-03 16:10:38 +02:00
static gboolean
sysctl_set ( NMPlatform * platform , const char * path , const char * value )
{
int fd , len , nwrote , tries ;
char * actual ;
g_return_val_if_fail ( path ! = NULL , FALSE ) ;
g_return_val_if_fail ( value ! = NULL , FALSE ) ;
2014-03-04 21:14:30 +01:00
/* Don't write outside known locations */
g_assert ( g_str_has_prefix ( path , " /proc/sys/ " )
| | g_str_has_prefix ( path , " /sys/ " ) ) ;
/* Don't write to suspicious locations */
2014-03-24 12:34:43 +01:00
g_assert ( ! strstr ( path , " /../ " ) ) ;
2014-03-04 21:14:30 +01:00
2013-04-03 16:10:38 +02:00
fd = open ( path , O_WRONLY | O_TRUNC ) ;
if ( fd = = - 1 ) {
2014-02-25 13:23:07 -05:00
if ( errno = = ENOENT ) {
2015-08-30 15:51:20 +02:00
_LOGD ( " sysctl: failed to open '%s': (%d) %s " ,
2014-02-25 13:23:07 -05:00
path , errno , strerror ( errno ) ) ;
} else {
2015-08-30 15:51:20 +02:00
_LOGE ( " sysctl: failed to open '%s': (%d) %s " ,
2014-02-25 13:23:07 -05:00
path , errno , strerror ( errno ) ) ;
}
2013-04-03 16:10:38 +02:00
return FALSE ;
}
2015-08-30 15:51:20 +02:00
_log_dbg_sysctl_set ( platform , path , value ) ;
2013-04-03 16:10:38 +02:00
/* Most sysfs and sysctl options don't care about a trailing LF, while some
* ( like infiniband ) do . So always add the LF . Also , neither sysfs nor
* sysctl support partial writes so the LF must be added to the string we ' re
* about to write .
*/
actual = g_strdup_printf ( " %s \n " , value ) ;
/* Try to write the entire value three times if a partial write occurs */
len = strlen ( actual ) ;
for ( tries = 0 , nwrote = 0 ; tries < 3 & & nwrote ! = len ; tries + + ) {
nwrote = write ( fd , actual , len ) ;
if ( nwrote = = - 1 ) {
if ( errno = = EINTR ) {
2015-08-30 15:51:20 +02:00
_LOGD ( " sysctl: interrupted, will try again " ) ;
2013-04-03 16:10:38 +02:00
continue ;
}
break ;
}
}
2014-02-25 13:23:07 -05:00
if ( nwrote = = - 1 & & errno ! = EEXIST ) {
2015-08-30 15:51:20 +02:00
_LOGE ( " sysctl: failed to set '%s' to '%s': (%d) %s " ,
2014-02-25 13:23:07 -05:00
path , value , errno , strerror ( errno ) ) ;
} else if ( nwrote < len ) {
2015-08-30 15:51:20 +02:00
_LOGE ( " sysctl: failed to set '%s' to '%s' after three attempts " ,
2014-02-25 13:23:07 -05:00
path , value ) ;
2013-04-03 16:10:38 +02:00
}
g_free ( actual ) ;
close ( fd ) ;
return ( nwrote = = len ) ;
}
2014-03-04 20:15:28 +01:00
static void
2015-08-30 15:51:20 +02:00
_log_dbg_sysctl_get_impl ( NMPlatform * platform , const char * path , const char * contents )
2014-03-04 20:15:28 +01:00
{
2015-08-30 15:51:20 +02:00
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
2014-03-04 20:15:28 +01:00
const char * prev_value = NULL ;
2015-08-30 15:51:20 +02:00
if ( ! priv - > sysctl_get_prev_values )
priv - > sysctl_get_prev_values = g_hash_table_new_full ( g_str_hash , g_str_equal , g_free , g_free ) ;
2014-03-04 20:15:28 +01:00
else
2015-08-30 15:51:20 +02:00
prev_value = g_hash_table_lookup ( priv - > sysctl_get_prev_values , path ) ;
2014-03-04 20:15:28 +01:00
if ( prev_value ) {
if ( strcmp ( prev_value , contents ) ! = 0 ) {
char * contents_escaped = g_strescape ( contents , NULL ) ;
char * prev_value_escaped = g_strescape ( prev_value , NULL ) ;
2015-08-30 15:51:20 +02:00
_LOGD ( " sysctl: reading '%s': '%s' (changed from '%s' on last read) " , path , contents_escaped , prev_value_escaped ) ;
2014-03-04 20:15:28 +01:00
g_free ( contents_escaped ) ;
g_free ( prev_value_escaped ) ;
2015-08-30 15:51:20 +02:00
g_hash_table_insert ( priv - > sysctl_get_prev_values , g_strdup ( path ) , g_strdup ( contents ) ) ;
2014-03-04 20:15:28 +01:00
}
} else {
char * contents_escaped = g_strescape ( contents , NULL ) ;
2015-08-30 15:51:20 +02:00
_LOGD ( " sysctl: reading '%s': '%s' " , path , contents_escaped ) ;
2014-03-04 20:15:28 +01:00
g_free ( contents_escaped ) ;
2015-08-30 15:51:20 +02:00
g_hash_table_insert ( priv - > sysctl_get_prev_values , g_strdup ( path ) , g_strdup ( contents ) ) ;
2014-03-04 20:15:28 +01:00
}
}
2015-08-30 15:51:20 +02:00
# define _log_dbg_sysctl_get(platform, path, contents) \
2014-03-04 20:15:28 +01:00
G_STMT_START { \
2015-08-30 15:51:20 +02:00
if ( _LOGD_ENABLED ( ) ) { \
_log_dbg_sysctl_get_impl ( platform , path , contents ) ; \
} else { \
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ; \
\
if ( priv - > sysctl_get_prev_values ) { \
g_hash_table_destroy ( priv - > sysctl_get_prev_values ) ; \
priv - > sysctl_get_prev_values = NULL ; \
} \
2014-03-04 20:15:28 +01:00
} \
} G_STMT_END
2013-04-03 16:10:38 +02:00
static char *
2014-02-25 13:23:07 -05:00
sysctl_get ( NMPlatform * platform , const char * path )
2013-04-03 16:10:38 +02:00
{
GError * error = NULL ;
char * contents ;
2014-03-04 21:14:30 +01:00
/* Don't write outside known locations */
g_assert ( g_str_has_prefix ( path , " /proc/sys/ " )
| | g_str_has_prefix ( path , " /sys/ " ) ) ;
/* Don't write to suspicious locations */
2014-03-24 12:34:43 +01:00
g_assert ( ! strstr ( path , " /../ " ) ) ;
2014-03-04 21:14:30 +01:00
2013-04-03 16:10:38 +02:00
if ( ! g_file_get_contents ( path , & contents , NULL , & error ) ) {
2014-02-25 13:23:07 -05:00
/* We assume FAILED means EOPNOTSUP */
if ( g_error_matches ( error , G_FILE_ERROR , G_FILE_ERROR_NOENT )
| | g_error_matches ( error , G_FILE_ERROR , G_FILE_ERROR_FAILED ) )
2015-08-30 15:51:20 +02:00
_LOGD ( " error reading %s: %s " , path , error - > message ) ;
2014-02-25 13:23:07 -05:00
else
2015-08-30 15:51:20 +02:00
_LOGE ( " error reading %s: %s " , path , error - > message ) ;
2013-09-23 18:15:21 +02:00
g_clear_error ( & error ) ;
2013-04-03 16:10:38 +02:00
return NULL ;
}
2014-03-04 20:15:28 +01:00
g_strstrip ( contents ) ;
2015-08-30 15:51:20 +02:00
_log_dbg_sysctl_get ( platform , path , contents ) ;
2014-03-04 20:15:28 +01:00
return contents ;
2013-04-03 16:10:38 +02:00
}
/******************************************************************/
2015-04-06 18:29:36 +02:00
static const NMPObject *
cache_lookup_link ( NMPlatform * platform , int ifindex )
{
const NMPObject * obj_cache ;
obj_cache = nmp_cache_lookup_link ( NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) - > cache , ifindex ) ;
platform: drop nm_platform_get_error()
For NMPlatform instances we had an error reporting mechanism
which stores the last error reason in a private field. Later we
would check it via nm_platform_get_error().
Remove this. It was not used much, and it is not a great way
to report errors.
One problem is that at the point where the error happens, you don't
know whether anybody cares about an error code. So, you add code to set
the error reason because somebody *might* need it (but in realitiy, almost
no caller cares).
Also, we tested this functionality which is hardly used in non-testing code.
While this was a burden to maintain in the tests, it was likely still buggy
because there were no real use-cases, beside the tests.
Then, sometimes platform functions call each other which might overwrite the
error reason. So, every function must be cautious to preserve/set
the error reason according to it's own meaning. This can involve storing
the error code, calling another function, and restoring it afterwards.
This is harder to get right compared to a "return-error-code" pattern, where
every function manages its error code independently.
It is better to return the error reason whenever due. For that we already
have our common glib patterns
(1) gboolean fcn (...);
(2) gboolean fcn (..., GError **error);
In few cases, we need more details then a #gboolean, but don't want
to bother constructing a #GError. Then we should do instead:
(3) NMPlatformError fcn (...);
2015-06-15 17:58:36 +02:00
if ( ! nmp_object_is_visible ( obj_cache ) )
2015-04-06 18:29:36 +02:00
return NULL ;
return obj_cache ;
}
2013-03-27 22:23:24 +01:00
static GArray *
link_get_all ( NMPlatform * platform )
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
return nmp_cache_lookup_multi_to_array ( priv - > cache ,
2015-06-19 16:24:18 +02:00
NMP_OBJECT_TYPE_LINK ,
nmp_cache_id_init_object_type ( NMP_CACHE_ID_STATIC , NMP_OBJECT_TYPE_LINK , TRUE ) ) ;
2013-03-27 22:23:24 +01:00
}
2015-06-20 12:05:01 +02:00
static const NMPlatformLink *
_nm_platform_link_get ( NMPlatform * platform , int ifindex )
2014-04-22 16:02:15 +02:00
{
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
const NMPObject * obj ;
obj = cache_lookup_link ( platform , ifindex ) ;
2015-06-20 12:05:01 +02:00
return obj ? & obj - > link : NULL ;
}
static const NMPlatformLink *
_nm_platform_link_get_by_ifname ( NMPlatform * platform ,
const char * ifname )
{
const NMPObject * obj = NULL ;
if ( ifname & & * ifname ) {
obj = nmp_cache_lookup_link_full ( NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) - > cache ,
0 , ifname , TRUE , NM_LINK_TYPE_NONE , NULL , NULL ) ;
}
return obj ? & obj - > link : NULL ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
}
2014-04-22 16:02:15 +02:00
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
struct _nm_platform_link_get_by_address_data {
gconstpointer address ;
guint8 length ;
} ;
static gboolean
_nm_platform_link_get_by_address_match_link ( const NMPObject * obj , struct _nm_platform_link_get_by_address_data * d )
{
return obj - > link . addr . len = = d - > length & & ! memcmp ( obj - > link . addr . data , d - > address , d - > length ) ;
2014-04-22 16:02:15 +02:00
}
2015-06-20 12:05:01 +02:00
static const NMPlatformLink *
2014-09-18 12:16:11 -05:00
_nm_platform_link_get_by_address ( NMPlatform * platform ,
gconstpointer address ,
2015-06-20 12:05:01 +02:00
size_t length )
2014-09-18 12:16:11 -05:00
{
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
const NMPObject * obj ;
struct _nm_platform_link_get_by_address_data d = {
. address = address ,
. length = length ,
} ;
2014-09-18 12:16:11 -05:00
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
if ( length < = 0 | | length > NM_UTILS_HWADDR_LEN_MAX )
2015-06-20 12:05:01 +02:00
return NULL ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
if ( ! address )
2015-06-20 12:05:01 +02:00
return NULL ;
2014-09-18 12:16:11 -05:00
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
obj = nmp_cache_lookup_link_full ( NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) - > cache ,
0 , NULL , TRUE , NM_LINK_TYPE_NONE ,
( NMPObjectMatchFn ) _nm_platform_link_get_by_address_match_link , & d ) ;
2015-06-20 12:05:01 +02:00
return obj ? & obj - > link : NULL ;
2014-09-18 12:16:11 -05:00
}
2013-03-27 22:23:24 +01:00
static struct nl_object *
build_rtnl_link ( int ifindex , const char * name , NMLinkType type )
{
struct rtnl_link * rtnllink ;
int nle ;
2015-05-29 09:38:26 +02:00
rtnllink = _nl_rtnl_link_alloc ( ifindex , name ) ;
2013-03-27 22:23:24 +01:00
if ( type ) {
2015-04-17 10:04:21 +02:00
nle = rtnl_link_set_type ( rtnllink , nm_link_type_to_rtnl_type_string ( type ) ) ;
2013-03-27 22:23:24 +01:00
g_assert ( ! nle ) ;
}
return ( struct nl_object * ) rtnllink ;
}
2015-04-14 23:14:06 +02:00
struct nl_object *
_nmp_vt_cmd_plobj_to_nl_link ( NMPlatform * platform , const NMPlatformObject * _obj , gboolean id_only )
{
const NMPlatformLink * obj = ( const NMPlatformLink * ) _obj ;
return build_rtnl_link ( obj - > ifindex ,
obj - > name [ 0 ] ? obj - > name : NULL ,
obj - > type ) ;
}
2015-04-06 18:29:36 +02:00
static gboolean
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
do_add_link ( NMPlatform * platform , const char * name , const struct rtnl_link * nlo )
2015-04-06 18:29:36 +02:00
{
NMPObject obj_needle ;
int nle ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
event_handler_read_netlink_all ( platform , FALSE ) ;
2015-06-19 16:24:18 +02:00
nle = kernel_add_object ( platform , NMP_OBJECT_TYPE_LINK , ( const struct nl_object * ) nlo ) ;
2015-04-06 18:29:36 +02:00
if ( nle < 0 ) {
_LOGE ( " do-add-link: failure adding link '%s': %s " , name , nl_geterror ( nle ) ) ;
return FALSE ;
}
_LOGD ( " do-add-link: success adding link '%s' " , name ) ;
nmp_object_stackinit_id_link ( & obj_needle , 0 ) ;
g_strlcpy ( obj_needle . link . name , name , sizeof ( obj_needle . link . name ) ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
delayed_action_handle_all ( platform , TRUE ) ;
/* FIXME: we add the link object via the second netlink socket. Sometimes,
* the notification is not yet ready via nlh_event , so we have to re - request the
* link so that it is in the cache . A better solution would be to do everything
* via one netlink socket . */
if ( ! nmp_cache_lookup_link_full ( NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) - > cache , 0 , obj_needle . link . name , FALSE , NM_LINK_TYPE_NONE , NULL , NULL ) ) {
_LOGT ( " do-add-link: reload: the added link is not yet ready. Request %s " , obj_needle . link . name ) ;
do_request_link ( platform , 0 , obj_needle . link . name , TRUE ) ;
}
2015-04-06 18:29:36 +02:00
/* Return true, because kernel_add_object() succeeded. This doesn't indicate that the
* object is now actuall in the cache , because there could be a race .
*
* For that , you ' d have to look at @ out_obj . */
return TRUE ;
}
static gboolean
do_add_link_with_lookup ( NMPlatform * platform , const char * name , const struct rtnl_link * nlo , NMLinkType expected_link_type , NMPlatformLink * out_link )
{
const NMPObject * obj ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
do_add_link ( platform , name , nlo ) ;
obj = nmp_cache_lookup_link_full ( NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) - > cache ,
0 , name , FALSE , expected_link_type , NULL , NULL ) ;
if ( out_link & & obj )
2015-04-06 18:29:36 +02:00
* out_link = obj - > link ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
return ! ! obj ;
2015-04-06 18:29:36 +02:00
}
static gboolean
do_add_addrroute ( NMPlatform * platform , const NMPObject * obj_id , const struct nl_object * nlo )
{
int nle ;
nm_assert ( NM_IN_SET ( NMP_OBJECT_GET_TYPE ( obj_id ) ,
2015-06-19 16:24:18 +02:00
NMP_OBJECT_TYPE_IP4_ADDRESS , NMP_OBJECT_TYPE_IP6_ADDRESS ,
NMP_OBJECT_TYPE_IP4_ROUTE , NMP_OBJECT_TYPE_IP6_ROUTE ) ) ;
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
event_handler_read_netlink_all ( platform , FALSE ) ;
2015-04-06 18:29:36 +02:00
nle = kernel_add_object ( platform , NMP_OBJECT_GET_CLASS ( obj_id ) - > obj_type , ( const struct nl_object * ) nlo ) ;
if ( nle < 0 ) {
_LOGW ( " do-add-%s: failure adding %s '%s': %s (%d) " ,
NMP_OBJECT_GET_CLASS ( obj_id ) - > obj_type_name ,
NMP_OBJECT_GET_CLASS ( obj_id ) - > obj_type_name ,
nmp_object_to_string ( obj_id , NMP_OBJECT_TO_STRING_ID , NULL , 0 ) ,
nl_geterror ( nle ) , - nle ) ;
return FALSE ;
}
_LOGD ( " do-add-%s: success adding object %s " , NMP_OBJECT_GET_CLASS ( obj_id ) - > obj_type_name , nmp_object_to_string ( obj_id , NMP_OBJECT_TO_STRING_ID , NULL , 0 ) ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
delayed_action_handle_all ( platform , TRUE ) ;
/* FIXME: instead of re-requesting the added object, add it via nlh_event
* so that the events are in sync . */
if ( ! nmp_cache_lookup_obj ( NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) - > cache , obj_id ) ) {
_LOGT ( " do-add-%s: reload: the added object is not yet ready. Request %s " , NMP_OBJECT_GET_CLASS ( obj_id ) - > obj_type_name , nmp_object_to_string ( obj_id , NMP_OBJECT_TO_STRING_ID , NULL , 0 ) ) ;
do_request_one_type ( platform , NMP_OBJECT_GET_TYPE ( obj_id ) , TRUE ) ;
}
2015-04-06 18:29:36 +02:00
/* The return value doesn't say, whether the object is in the platform cache after adding
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
* it .
2015-04-06 18:29:36 +02:00
* Instead the return value says , whether kernel_add_object ( ) succeeded . */
return TRUE ;
}
static gboolean
do_delete_object ( NMPlatform * platform , const NMPObject * obj_id , const struct nl_object * nlo )
{
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
2015-04-06 18:29:36 +02:00
auto_nl_object struct nl_object * nlo_free = NULL ;
int nle ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
event_handler_read_netlink_all ( platform , FALSE ) ;
2015-04-06 18:29:36 +02:00
if ( ! nlo )
nlo = nlo_free = nmp_object_to_nl ( platform , obj_id , FALSE ) ;
nle = kernel_delete_object ( platform , NMP_OBJECT_GET_TYPE ( obj_id ) , nlo ) ;
if ( nle < 0 )
_LOGE ( " do-delete-%s: failure deleting '%s': %s (%d) " , NMP_OBJECT_GET_CLASS ( obj_id ) - > obj_type_name , nmp_object_to_string ( obj_id , NMP_OBJECT_TO_STRING_ID , NULL , 0 ) , nl_geterror ( nle ) , - nle ) ;
else
_LOGD ( " do-delete-%s: success deleting '%s' " , NMP_OBJECT_GET_CLASS ( obj_id ) - > obj_type_name , nmp_object_to_string ( obj_id , NMP_OBJECT_TO_STRING_ID , NULL , 0 ) ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
delayed_action_handle_all ( platform , TRUE ) ;
/* FIXME: instead of re-requesting the deleted object, add it via nlh_event
* so that the events are in sync . */
2015-06-19 16:24:18 +02:00
if ( NMP_OBJECT_GET_TYPE ( obj_id ) = = NMP_OBJECT_TYPE_LINK ) {
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
const NMPObject * obj ;
obj = nmp_cache_lookup_link_full ( priv - > cache , obj_id - > link . ifindex , obj_id - > link . ifindex < = 0 & & obj_id - > link . name [ 0 ] ? obj_id - > link . name : NULL , FALSE , NM_LINK_TYPE_NONE , NULL , NULL ) ;
if ( obj & & obj - > _link . netlink . is_in_netlink ) {
_LOGT ( " do-delete-%s: reload: the deleted object is not yet removed. Request %s " , NMP_OBJECT_GET_CLASS ( obj_id ) - > obj_type_name , nmp_object_to_string ( obj_id , NMP_OBJECT_TO_STRING_ID , NULL , 0 ) ) ;
do_request_link ( platform , obj_id - > link . ifindex , obj_id - > link . name , TRUE ) ;
}
} else {
if ( nmp_cache_lookup_obj ( priv - > cache , obj_id ) ) {
_LOGT ( " do-delete-%s: reload: the deleted object is not yet removed. Request %s " , NMP_OBJECT_GET_CLASS ( obj_id ) - > obj_type_name , nmp_object_to_string ( obj_id , NMP_OBJECT_TO_STRING_ID , NULL , 0 ) ) ;
do_request_one_type ( platform , NMP_OBJECT_GET_TYPE ( obj_id ) , TRUE ) ;
}
}
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
/* The return value doesn't say, whether the object is in the platform cache after adding
* it .
* Instead the return value says , whether kernel_add_object ( ) succeeded . */
2015-04-06 18:29:36 +02:00
return nle > = 0 ;
}
2015-06-15 17:41:27 +02:00
static NMPlatformError
2015-04-06 18:29:36 +02:00
do_change_link ( NMPlatform * platform , struct rtnl_link * nlo , gboolean complete_from_cache )
{
int nle ;
int ifindex ;
gboolean complete_from_cache2 = complete_from_cache ;
ifindex = rtnl_link_get_ifindex ( nlo ) ;
if ( ifindex < = 0 )
2015-06-15 17:41:27 +02:00
g_return_val_if_reached ( NM_PLATFORM_ERROR_BUG ) ;
2015-04-06 18:29:36 +02:00
nle = kernel_change_link ( platform , nlo , & complete_from_cache2 ) ;
switch ( nle ) {
case - NLE_SUCCESS :
_LOGD ( " do-change-link: success changing link %d " , ifindex ) ;
break ;
case - NLE_EXIST :
_LOGD ( " do-change-link: success changing link %d: %s (%d) " , ifindex , nl_geterror ( nle ) , - nle ) ;
break ;
case - NLE_OBJ_NOTFOUND :
/* fall-through */
default :
platform: drop nm_platform_get_error()
For NMPlatform instances we had an error reporting mechanism
which stores the last error reason in a private field. Later we
would check it via nm_platform_get_error().
Remove this. It was not used much, and it is not a great way
to report errors.
One problem is that at the point where the error happens, you don't
know whether anybody cares about an error code. So, you add code to set
the error reason because somebody *might* need it (but in realitiy, almost
no caller cares).
Also, we tested this functionality which is hardly used in non-testing code.
While this was a burden to maintain in the tests, it was likely still buggy
because there were no real use-cases, beside the tests.
Then, sometimes platform functions call each other which might overwrite the
error reason. So, every function must be cautious to preserve/set
the error reason according to it's own meaning. This can involve storing
the error code, calling another function, and restoring it afterwards.
This is harder to get right compared to a "return-error-code" pattern, where
every function manages its error code independently.
It is better to return the error reason whenever due. For that we already
have our common glib patterns
(1) gboolean fcn (...);
(2) gboolean fcn (..., GError **error);
In few cases, we need more details then a #gboolean, but don't want
to bother constructing a #GError. Then we should do instead:
(3) NMPlatformError fcn (...);
2015-06-15 17:58:36 +02:00
if ( complete_from_cache ! = complete_from_cache2 )
2015-04-06 18:29:36 +02:00
_LOGD ( " do-change-link: failure changing link %d: link does not exist in cache " , ifindex ) ;
platform: drop nm_platform_get_error()
For NMPlatform instances we had an error reporting mechanism
which stores the last error reason in a private field. Later we
would check it via nm_platform_get_error().
Remove this. It was not used much, and it is not a great way
to report errors.
One problem is that at the point where the error happens, you don't
know whether anybody cares about an error code. So, you add code to set
the error reason because somebody *might* need it (but in realitiy, almost
no caller cares).
Also, we tested this functionality which is hardly used in non-testing code.
While this was a burden to maintain in the tests, it was likely still buggy
because there were no real use-cases, beside the tests.
Then, sometimes platform functions call each other which might overwrite the
error reason. So, every function must be cautious to preserve/set
the error reason according to it's own meaning. This can involve storing
the error code, calling another function, and restoring it afterwards.
This is harder to get right compared to a "return-error-code" pattern, where
every function manages its error code independently.
It is better to return the error reason whenever due. For that we already
have our common glib patterns
(1) gboolean fcn (...);
(2) gboolean fcn (..., GError **error);
In few cases, we need more details then a #gboolean, but don't want
to bother constructing a #GError. Then we should do instead:
(3) NMPlatformError fcn (...);
2015-06-15 17:58:36 +02:00
else
2015-04-06 18:29:36 +02:00
_LOGE ( " do-change-link: failure changing link %d: %s (%d) " , ifindex , nl_geterror ( nle ) , - nle ) ;
2015-06-15 17:41:27 +02:00
return nle = = - NLE_OBJ_NOTFOUND ? NM_PLATFORM_ERROR_NO_FIRMWARE : NM_PLATFORM_ERROR_UNSPECIFIED ;
2015-04-06 18:29:36 +02:00
}
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
/* FIXME: as we modify the link via a separate socket, the cache is not in
* sync and we have to refetch the link . */
do_request_link ( platform , ifindex , NULL , TRUE ) ;
2015-06-15 17:41:27 +02:00
return NM_PLATFORM_ERROR_SUCCESS ;
2015-04-06 18:29:36 +02:00
}
2014-09-18 12:53:19 -05:00
static gboolean
link_add ( NMPlatform * platform ,
const char * name ,
NMLinkType type ,
const void * address ,
size_t address_len ,
NMPlatformLink * out_link )
2013-03-27 22:23:24 +01:00
{
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
auto_nl_object struct nl_object * l = NULL ;
2013-05-09 10:51:27 -05:00
2013-04-26 21:20:57 +02:00
if ( type = = NM_LINK_TYPE_BOND ) {
/* When the kernel loads the bond module, either via explicit modprobe
* or automatically in response to creating a bond master , it will also
* create a ' bond0 ' interface . Since the bond we ' re about to create may
* or may not be named ' bond0 ' prevent potential confusion about a bond
* that the user didn ' t want by telling the bonding module not to create
* bond0 automatically .
*/
if ( ! g_file_test ( " /sys/class/net/bonding_masters " , G_FILE_TEST_EXISTS ) )
2015-05-18 17:37:05 +02:00
nm_utils_modprobe ( NULL , TRUE , " bonding " , " max_bonds=0 " , NULL ) ;
2013-04-26 21:20:57 +02:00
}
2014-03-05 10:56:16 +01:00
debug ( " link: add link '%s' of type '%s' (%d) " ,
2015-04-17 10:04:21 +02:00
name , nm_link_type_to_string ( type ) , ( int ) type ) ;
2014-03-05 10:56:16 +01:00
2014-08-21 10:35:16 +02:00
l = build_rtnl_link ( 0 , name , type ) ;
2014-05-13 18:13:52 +02:00
g_assert ( ( address ! = NULL ) ^ ( address_len = = 0 ) ) ;
if ( address ) {
2015-05-29 09:38:26 +02:00
auto_nl_addr struct nl_addr * nladdr = _nl_addr_build ( AF_LLC , address , address_len ) ;
2014-05-13 18:13:52 +02:00
2014-08-21 10:35:16 +02:00
rtnl_link_set_addr ( ( struct rtnl_link * ) l , nladdr ) ;
2014-05-13 18:13:52 +02:00
}
2014-09-18 12:53:19 -05:00
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
return do_add_link_with_lookup ( platform , name , ( struct rtnl_link * ) l , type , out_link ) ;
2013-03-27 22:23:24 +01:00
}
static gboolean
link_delete ( NMPlatform * platform , int ifindex )
{
2013-07-26 17:03:39 +02:00
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
NMPObject obj_needle ;
const NMPObject * obj ;
2013-07-26 17:03:39 +02:00
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
obj = nmp_cache_lookup_link ( priv - > cache , ifindex ) ;
platform: drop nm_platform_get_error()
For NMPlatform instances we had an error reporting mechanism
which stores the last error reason in a private field. Later we
would check it via nm_platform_get_error().
Remove this. It was not used much, and it is not a great way
to report errors.
One problem is that at the point where the error happens, you don't
know whether anybody cares about an error code. So, you add code to set
the error reason because somebody *might* need it (but in realitiy, almost
no caller cares).
Also, we tested this functionality which is hardly used in non-testing code.
While this was a burden to maintain in the tests, it was likely still buggy
because there were no real use-cases, beside the tests.
Then, sometimes platform functions call each other which might overwrite the
error reason. So, every function must be cautious to preserve/set
the error reason according to it's own meaning. This can involve storing
the error code, calling another function, and restoring it afterwards.
This is harder to get right compared to a "return-error-code" pattern, where
every function manages its error code independently.
It is better to return the error reason whenever due. For that we already
have our common glib patterns
(1) gboolean fcn (...);
(2) gboolean fcn (..., GError **error);
In few cases, we need more details then a #gboolean, but don't want
to bother constructing a #GError. Then we should do instead:
(3) NMPlatformError fcn (...);
2015-06-15 17:58:36 +02:00
if ( ! obj | | ! obj - > _link . netlink . is_in_netlink )
2013-07-26 17:03:39 +02:00
return FALSE ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
nmp_object_stackinit_id_link ( & obj_needle , ifindex ) ;
return do_delete_object ( platform , & obj_needle , NULL ) ;
2013-03-27 22:23:24 +01:00
}
2013-04-26 11:43:08 -04:00
static const char *
link_get_type_name ( NMPlatform * platform , int ifindex )
{
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
const NMPObject * obj = cache_lookup_link ( platform , ifindex ) ;
2015-04-28 10:11:04 +02:00
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
if ( ! obj )
2015-04-28 10:11:04 +02:00
return NULL ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
if ( obj - > link . type ! = NM_LINK_TYPE_UNKNOWN ) {
2015-04-28 10:11:04 +02:00
/* We could detect the @link_type. In this case the function returns
* our internel module names , which differs from rtnl_link_get_type ( ) :
* - NM_LINK_TYPE_INFINIBAND ( gives " infiniband " , instead of " ipoib " )
* - NM_LINK_TYPE_TAP ( gives " tap " , instead of " tun " ) .
* Note that this functions is only used by NMDeviceGeneric to
* set type_description . */
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
return nm_link_type_to_string ( obj - > link . type ) ;
2015-04-28 10:11:04 +02:00
}
/* Link type not detected. Fallback to rtnl_link_get_type()/IFLA_INFO_KIND. */
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
return str_if_set ( obj - > link . kind , " unknown " ) ;
2013-03-27 22:23:24 +01:00
}
2015-01-22 16:41:15 +01:00
static gboolean
2015-06-24 14:21:27 +02:00
link_get_unmanaged ( NMPlatform * platform , int ifindex , gboolean * unmanaged )
2015-01-22 16:41:15 +01:00
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
const NMPObject * link ;
GUdevDevice * udev_device = NULL ;
link = nmp_cache_lookup_link ( priv - > cache , ifindex ) ;
if ( link )
udev_device = link - > _link . udev . device ;
2015-01-22 16:41:15 +01:00
2015-03-27 13:58:52 -05:00
if ( udev_device & & g_udev_device_get_property ( udev_device , " NM_UNMANAGED " ) ) {
2015-06-24 14:21:27 +02:00
* unmanaged = g_udev_device_get_property_as_boolean ( udev_device , " NM_UNMANAGED " ) ;
2015-01-22 16:41:15 +01:00
return TRUE ;
}
return FALSE ;
}
2014-02-11 13:58:00 +01:00
static gboolean
link_refresh ( NMPlatform * platform , int ifindex )
{
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
do_request_link ( platform , ifindex , NULL , TRUE ) ;
return ! ! cache_lookup_link ( platform , ifindex ) ;
2014-02-11 13:58:00 +01:00
}
2015-06-15 17:41:27 +02:00
static NMPlatformError
2013-03-27 22:23:24 +01:00
link_change_flags ( NMPlatform * platform , int ifindex , unsigned int flags , gboolean value )
{
2015-05-29 09:38:26 +02:00
auto_nl_object struct rtnl_link * change = _nl_rtnl_link_alloc ( ifindex , NULL ) ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
const NMPObject * obj_cache ;
char buf [ 256 ] ;
obj_cache = cache_lookup_link ( platform , ifindex ) ;
if ( ! obj_cache )
2015-06-15 17:41:27 +02:00
return NM_PLATFORM_ERROR_NOT_FOUND ;
2013-03-27 22:23:24 +01:00
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
rtnl_link_set_flags ( change , obj_cache - > link . flags ) ;
2013-03-27 22:23:24 +01:00
if ( value )
rtnl_link_set_flags ( change , flags ) ;
else
rtnl_link_unset_flags ( change , flags ) ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
_LOGD ( " link: change %d: flags %s '%s' (%d) " , ifindex ,
value ? " set " : " unset " ,
rtnl_link_flags2str ( flags , buf , sizeof ( buf ) ) ,
flags ) ;
2014-03-05 10:56:16 +01:00
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
return do_change_link ( platform , change , FALSE ) ;
2013-03-27 22:23:24 +01:00
}
static gboolean
2015-06-15 17:41:27 +02:00
link_set_up ( NMPlatform * platform , int ifindex , gboolean * out_no_firmware )
2013-03-27 22:23:24 +01:00
{
2015-06-15 17:41:27 +02:00
NMPlatformError plerr ;
plerr = link_change_flags ( platform , ifindex , IFF_UP , TRUE ) ;
if ( out_no_firmware )
* out_no_firmware = plerr = = NM_PLATFORM_ERROR_NO_FIRMWARE ;
return plerr = = NM_PLATFORM_ERROR_SUCCESS ;
2013-03-27 22:23:24 +01:00
}
static gboolean
link_set_down ( NMPlatform * platform , int ifindex )
{
2015-06-15 17:41:27 +02:00
return link_change_flags ( platform , ifindex , IFF_UP , FALSE ) = = NM_PLATFORM_ERROR_SUCCESS ;
2013-03-27 22:23:24 +01:00
}
static gboolean
link_set_arp ( NMPlatform * platform , int ifindex )
{
2015-06-15 17:41:27 +02:00
return link_change_flags ( platform , ifindex , IFF_NOARP , FALSE ) = = NM_PLATFORM_ERROR_SUCCESS ;
2013-03-27 22:23:24 +01:00
}
static gboolean
link_set_noarp ( NMPlatform * platform , int ifindex )
{
2015-06-15 17:41:27 +02:00
return link_change_flags ( platform , ifindex , IFF_NOARP , TRUE ) = = NM_PLATFORM_ERROR_SUCCESS ;
2013-03-27 22:23:24 +01:00
}
2015-06-15 14:41:35 +02:00
static const char *
link_get_udi ( NMPlatform * platform , int ifindex )
{
const NMPObject * obj = cache_lookup_link ( platform , ifindex ) ;
if ( ! obj
| | ! obj - > _link . netlink . is_in_netlink
| | ! obj - > _link . udev . device )
return NULL ;
return g_udev_device_get_sysfs_path ( obj - > _link . udev . device ) ;
}
2015-06-15 15:19:28 +02:00
static GObject *
link_get_udev_device ( NMPlatform * platform , int ifindex )
{
const NMPObject * obj_cache ;
/* we don't use cache_lookup_link() because this would return NULL
* if the link is not visible in libnl . For link_get_udev_device ( )
* we want to return whatever we have , even if the link itself
* appears invisible via other platform functions . */
obj_cache = nmp_cache_lookup_link ( NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) - > cache , ifindex ) ;
return obj_cache ? ( GObject * ) obj_cache - > _link . udev . device : NULL ;
}
2014-07-24 15:57:08 -05:00
static gboolean
link_set_user_ipv6ll_enabled ( NMPlatform * platform , int ifindex , gboolean enabled )
{
# if HAVE_LIBNL_INET6_ADDR_GEN_MODE
2015-04-13 18:21:55 +02:00
if ( _support_user_ipv6ll_get ( ) ) {
2015-05-29 09:38:26 +02:00
auto_nl_object struct rtnl_link * nlo = _nl_rtnl_link_alloc ( ifindex , NULL ) ;
2014-07-24 15:57:08 -05:00
guint8 mode = enabled ? IN6_ADDR_GEN_MODE_NONE : IN6_ADDR_GEN_MODE_EUI64 ;
char buf [ 32 ] ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
rtnl_link_inet6_set_addr_gen_mode ( nlo , mode ) ;
2014-07-24 15:57:08 -05:00
debug ( " link: change %d: set IPv6 address generation mode to %s " ,
ifindex , rtnl_link_inet6_addrgenmode2str ( mode , buf , sizeof ( buf ) ) ) ;
2015-06-15 17:41:27 +02:00
return do_change_link ( platform , nlo , TRUE ) = = NM_PLATFORM_ERROR_SUCCESS ;
2014-07-24 15:57:08 -05:00
}
# endif
return FALSE ;
}
2013-05-20 15:38:54 -03:00
static gboolean
link_supports_carrier_detect ( NMPlatform * platform , int ifindex )
{
platform: add self argument to platform functions
Most nm_platform_*() functions operate on the platform
singleton nm_platform_get(). That made sense because the
NMPlatform instance was mainly to hook fake platform for
testing.
While the implicit argument saved some typing, I think explicit is
better. Especially, because NMPlatform could become a more usable
object then just a hook for testing.
With this change, NMPlatform instances can be used individually, not
only as a singleton instance.
Before this change, the constructor of NMLinuxPlatform could not
call any nm_platform_*() functions because the singleton was not
yet initialized. We could only instantiate an incomplete instance,
register it via nm_platform_setup(), and then complete initialization
via singleton->setup().
With this change, we can create and fully initialize NMPlatform instances
before/without setting them up them as singleton.
Also, currently there is no clear distinction between functions
that operate on the NMPlatform instance, and functions that can
be used stand-alone (e.g. nm_platform_ip4_address_to_string()).
The latter can not be mocked for testing. With this change, the
distinction becomes obvious. That is also useful because it becomes
clearer which functions make use of the platform cache and which not.
Inside nm-linux-platform.c, continue the pattern that the
self instance is named @platform. That makes sense because
its type is NMPlatform, and not NMLinuxPlatform what we
would expect from a paramter named @self.
This is a major diff that causes some pain when rebasing. Try
to rebase to the parent commit of this commit as a first step.
Then rebase on top of this commit using merge-strategy "ours".
2015-04-18 12:36:09 +02:00
const char * name = nm_platform_link_get_name ( platform , ifindex ) ;
2013-05-20 15:38:54 -03:00
if ( ! name )
return FALSE ;
/* We use netlink for the actual carrier detection, but netlink can't tell
* us whether the device actually supports carrier detection in the first
* place . We assume any device that does implements one of these two APIs .
*/
2015-05-03 12:49:46 +02:00
return nmp_utils_ethtool_supports_carrier_detect ( name ) | | nmp_utils_mii_supports_carrier_detect ( name ) ;
2013-03-27 22:53:55 +01:00
}
static gboolean
link_supports_vlans ( NMPlatform * platform , int ifindex )
{
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
const NMPObject * obj ;
obj = cache_lookup_link ( platform , ifindex ) ;
2013-03-27 22:53:55 +01:00
2013-04-17 12:30:09 +02:00
/* Only ARPHRD_ETHER links can possibly support VLANs. */
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
if ( ! obj | | obj - > link . arptype ! = ARPHRD_ETHER )
2013-03-27 22:53:55 +01:00
return FALSE ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
return nmp_utils_ethtool_supports_vlans ( obj - > link . name ) ;
2013-03-27 22:53:55 +01:00
}
2013-03-27 22:53:55 +01:00
static gboolean
link_set_address ( NMPlatform * platform , int ifindex , gconstpointer address , size_t length )
{
2015-05-29 09:38:26 +02:00
auto_nl_object struct rtnl_link * change = _nl_rtnl_link_alloc ( ifindex , NULL ) ;
auto_nl_addr struct nl_addr * nladdr = _nl_addr_build ( AF_LLC , address , length ) ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
gs_free char * mac = NULL ;
2013-03-27 22:53:55 +01:00
rtnl_link_set_addr ( change , nladdr ) ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
_LOGD ( " link: change %d: address %s (%lu bytes) " , ifindex ,
( mac = nm_utils_hwaddr_ntoa ( address , length ) ) ,
( unsigned long ) length ) ;
2015-06-15 17:41:27 +02:00
return do_change_link ( platform , change , TRUE ) = = NM_PLATFORM_ERROR_SUCCESS ;
2013-03-27 22:53:55 +01:00
}
2014-10-03 17:37:26 -05:00
static gboolean
link_get_permanent_address ( NMPlatform * platform ,
int ifindex ,
guint8 * buf ,
size_t * length )
{
2015-05-02 07:59:59 +02:00
return nmp_utils_ethtool_get_permanent_address ( nm_platform_link_get_name ( platform , ifindex ) , buf , length ) ;
2014-10-03 17:37:26 -05:00
}
2013-04-15 21:48:12 +02:00
static gboolean
link_set_mtu ( NMPlatform * platform , int ifindex , guint32 mtu )
{
2015-05-29 09:38:26 +02:00
auto_nl_object struct rtnl_link * change = _nl_rtnl_link_alloc ( ifindex , NULL ) ;
2013-04-15 21:48:12 +02:00
rtnl_link_set_mtu ( change , mtu ) ;
2014-03-05 10:56:16 +01:00
debug ( " link: change %d: mtu %lu " , ifindex , ( unsigned long ) mtu ) ;
2015-06-15 17:41:27 +02:00
return do_change_link ( platform , change , TRUE ) = = NM_PLATFORM_ERROR_SUCCESS ;
2013-04-15 21:48:12 +02:00
}
2013-10-11 14:59:26 -04:00
static char *
link_get_physical_port_id ( NMPlatform * platform , int ifindex )
{
const char * ifname ;
char * path , * id ;
platform: add self argument to platform functions
Most nm_platform_*() functions operate on the platform
singleton nm_platform_get(). That made sense because the
NMPlatform instance was mainly to hook fake platform for
testing.
While the implicit argument saved some typing, I think explicit is
better. Especially, because NMPlatform could become a more usable
object then just a hook for testing.
With this change, NMPlatform instances can be used individually, not
only as a singleton instance.
Before this change, the constructor of NMLinuxPlatform could not
call any nm_platform_*() functions because the singleton was not
yet initialized. We could only instantiate an incomplete instance,
register it via nm_platform_setup(), and then complete initialization
via singleton->setup().
With this change, we can create and fully initialize NMPlatform instances
before/without setting them up them as singleton.
Also, currently there is no clear distinction between functions
that operate on the NMPlatform instance, and functions that can
be used stand-alone (e.g. nm_platform_ip4_address_to_string()).
The latter can not be mocked for testing. With this change, the
distinction becomes obvious. That is also useful because it becomes
clearer which functions make use of the platform cache and which not.
Inside nm-linux-platform.c, continue the pattern that the
self instance is named @platform. That makes sense because
its type is NMPlatform, and not NMLinuxPlatform what we
would expect from a paramter named @self.
This is a major diff that causes some pain when rebasing. Try
to rebase to the parent commit of this commit as a first step.
Then rebase on top of this commit using merge-strategy "ours".
2015-04-18 12:36:09 +02:00
ifname = nm_platform_link_get_name ( platform , ifindex ) ;
2013-10-11 14:59:26 -04:00
if ( ! ifname )
return NULL ;
2014-03-12 12:49:34 +01:00
ifname = ASSERT_VALID_PATH_COMPONENT ( ifname ) ;
2014-01-22 13:20:18 -05:00
path = g_strdup_printf ( " /sys/class/net/%s/phys_port_id " , ifname ) ;
2014-02-25 13:23:07 -05:00
id = sysctl_get ( platform , path ) ;
2013-10-11 14:59:26 -04:00
g_free ( path ) ;
return id ;
}
2015-03-24 12:35:36 -05:00
static guint
link_get_dev_id ( NMPlatform * platform , int ifindex )
{
const char * ifname ;
gs_free char * path = NULL , * id = NULL ;
gint64 int_val ;
platform: add self argument to platform functions
Most nm_platform_*() functions operate on the platform
singleton nm_platform_get(). That made sense because the
NMPlatform instance was mainly to hook fake platform for
testing.
While the implicit argument saved some typing, I think explicit is
better. Especially, because NMPlatform could become a more usable
object then just a hook for testing.
With this change, NMPlatform instances can be used individually, not
only as a singleton instance.
Before this change, the constructor of NMLinuxPlatform could not
call any nm_platform_*() functions because the singleton was not
yet initialized. We could only instantiate an incomplete instance,
register it via nm_platform_setup(), and then complete initialization
via singleton->setup().
With this change, we can create and fully initialize NMPlatform instances
before/without setting them up them as singleton.
Also, currently there is no clear distinction between functions
that operate on the NMPlatform instance, and functions that can
be used stand-alone (e.g. nm_platform_ip4_address_to_string()).
The latter can not be mocked for testing. With this change, the
distinction becomes obvious. That is also useful because it becomes
clearer which functions make use of the platform cache and which not.
Inside nm-linux-platform.c, continue the pattern that the
self instance is named @platform. That makes sense because
its type is NMPlatform, and not NMLinuxPlatform what we
would expect from a paramter named @self.
This is a major diff that causes some pain when rebasing. Try
to rebase to the parent commit of this commit as a first step.
Then rebase on top of this commit using merge-strategy "ours".
2015-04-18 12:36:09 +02:00
ifname = nm_platform_link_get_name ( platform , ifindex ) ;
2015-03-24 12:35:36 -05:00
if ( ! ifname )
return 0 ;
ifname = ASSERT_VALID_PATH_COMPONENT ( ifname ) ;
path = g_strdup_printf ( " /sys/class/net/%s/dev_id " , ifname ) ;
id = sysctl_get ( platform , path ) ;
if ( ! id | | ! * id )
return 0 ;
/* Value is reported as hex */
int_val = _nm_utils_ascii_str_to_int64 ( id , 16 , 0 , G_MAXUINT16 , 0 ) ;
return errno ? 0 : ( int ) int_val ;
}
2013-03-27 22:53:55 +01:00
static int
2014-09-18 12:53:19 -05:00
vlan_add ( NMPlatform * platform ,
const char * name ,
int parent ,
int vlan_id ,
guint32 vlan_flags ,
NMPlatformLink * out_link )
2013-03-27 22:53:55 +01:00
{
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
auto_nl_object struct rtnl_link * rtnllink = ( struct rtnl_link * ) build_rtnl_link ( 0 , name , NM_LINK_TYPE_VLAN ) ;
2013-03-27 22:53:55 +01:00
unsigned int kernel_flags ;
kernel_flags = 0 ;
if ( vlan_flags & NM_VLAN_FLAG_REORDER_HEADERS )
kernel_flags | = VLAN_FLAG_REORDER_HDR ;
if ( vlan_flags & NM_VLAN_FLAG_GVRP )
kernel_flags | = VLAN_FLAG_GVRP ;
if ( vlan_flags & NM_VLAN_FLAG_LOOSE_BINDING )
kernel_flags | = VLAN_FLAG_LOOSE_BINDING ;
rtnl_link_set_link ( rtnllink , parent ) ;
rtnl_link_vlan_set_id ( rtnllink , vlan_id ) ;
2013-10-20 18:12:45 +02:00
rtnl_link_vlan_set_flags ( rtnllink , kernel_flags ) ;
2013-03-27 22:53:55 +01:00
2014-03-05 10:56:16 +01:00
debug ( " link: add vlan '%s', parent %d, vlan id %d, flags %X (native: %X) " ,
name , parent , vlan_id , ( unsigned int ) vlan_flags , kernel_flags ) ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
return do_add_link_with_lookup ( platform , name , rtnllink , NM_LINK_TYPE_VLAN , out_link ) ;
2013-03-27 22:53:55 +01:00
}
static gboolean
vlan_get_info ( NMPlatform * platform , int ifindex , int * parent , int * vlan_id )
{
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
const NMPObject * obj = cache_lookup_link ( platform , ifindex ) ;
int p = 0 , v = 0 ;
2013-03-27 22:53:55 +01:00
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
if ( obj ) {
p = obj - > link . parent ;
v = obj - > link . vlan_id ;
}
2013-03-27 22:53:55 +01:00
if ( parent )
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
* parent = p ;
2013-03-27 22:53:55 +01:00
if ( vlan_id )
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
* vlan_id = v ;
return ! ! obj ;
2013-03-27 22:53:55 +01:00
}
static gboolean
vlan_set_ingress_map ( NMPlatform * platform , int ifindex , int from , int to )
{
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
auto_nl_object struct rtnl_link * change = ( struct rtnl_link * ) build_rtnl_link ( ifindex , NULL , NM_LINK_TYPE_VLAN ) ;
2013-03-27 22:53:55 +01:00
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
rtnl_link_set_type ( change , " vlan " ) ;
2013-09-03 14:02:59 -04:00
rtnl_link_vlan_set_ingress_map ( change , from , to ) ;
2013-03-27 22:53:55 +01:00
2014-03-05 10:56:16 +01:00
debug ( " link: change %d: vlan ingress map %d -> %d " , ifindex , from , to ) ;
2015-06-15 17:41:27 +02:00
return do_change_link ( platform , change , TRUE ) = = NM_PLATFORM_ERROR_SUCCESS ;
2013-03-27 22:53:55 +01:00
}
static gboolean
vlan_set_egress_map ( NMPlatform * platform , int ifindex , int from , int to )
{
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
auto_nl_object struct rtnl_link * change = ( struct rtnl_link * ) build_rtnl_link ( ifindex , NULL , NM_LINK_TYPE_VLAN ) ;
2013-03-27 22:53:55 +01:00
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
rtnl_link_set_type ( change , " vlan " ) ;
2013-03-27 22:53:55 +01:00
rtnl_link_vlan_set_egress_map ( change , from , to ) ;
2014-03-05 10:56:16 +01:00
debug ( " link: change %d: vlan egress map %d -> %d " , ifindex , from , to ) ;
2015-06-15 17:41:27 +02:00
return do_change_link ( platform , change , TRUE ) = = NM_PLATFORM_ERROR_SUCCESS ;
2013-03-27 22:53:55 +01:00
}
2013-03-27 22:53:55 +01:00
static gboolean
2013-08-02 00:43:12 +02:00
link_enslave ( NMPlatform * platform , int master , int slave )
2013-03-27 22:53:55 +01:00
{
2015-05-29 09:38:26 +02:00
auto_nl_object struct rtnl_link * change = _nl_rtnl_link_alloc ( slave , NULL ) ;
2013-03-27 22:53:55 +01:00
2013-08-02 00:43:12 +02:00
rtnl_link_set_master ( change , master ) ;
2014-03-05 10:56:16 +01:00
debug ( " link: change %d: enslave to master %d " , slave , master ) ;
2015-06-15 17:41:27 +02:00
return do_change_link ( platform , change , TRUE ) = = NM_PLATFORM_ERROR_SUCCESS ;
2013-03-27 22:53:55 +01:00
}
static gboolean
link_release ( NMPlatform * platform , int master , int slave )
{
2013-08-02 00:43:12 +02:00
return link_enslave ( platform , 0 , slave ) ;
2013-03-27 22:53:55 +01:00
}
2013-04-04 17:07:47 +02:00
static char *
platform: add self argument to platform functions
Most nm_platform_*() functions operate on the platform
singleton nm_platform_get(). That made sense because the
NMPlatform instance was mainly to hook fake platform for
testing.
While the implicit argument saved some typing, I think explicit is
better. Especially, because NMPlatform could become a more usable
object then just a hook for testing.
With this change, NMPlatform instances can be used individually, not
only as a singleton instance.
Before this change, the constructor of NMLinuxPlatform could not
call any nm_platform_*() functions because the singleton was not
yet initialized. We could only instantiate an incomplete instance,
register it via nm_platform_setup(), and then complete initialization
via singleton->setup().
With this change, we can create and fully initialize NMPlatform instances
before/without setting them up them as singleton.
Also, currently there is no clear distinction between functions
that operate on the NMPlatform instance, and functions that can
be used stand-alone (e.g. nm_platform_ip4_address_to_string()).
The latter can not be mocked for testing. With this change, the
distinction becomes obvious. That is also useful because it becomes
clearer which functions make use of the platform cache and which not.
Inside nm-linux-platform.c, continue the pattern that the
self instance is named @platform. That makes sense because
its type is NMPlatform, and not NMLinuxPlatform what we
would expect from a paramter named @self.
This is a major diff that causes some pain when rebasing. Try
to rebase to the parent commit of this commit as a first step.
Then rebase on top of this commit using merge-strategy "ours".
2015-04-18 12:36:09 +02:00
link_option_path ( NMPlatform * platform , int master , const char * category , const char * option )
2013-04-04 17:07:47 +02:00
{
platform: add self argument to platform functions
Most nm_platform_*() functions operate on the platform
singleton nm_platform_get(). That made sense because the
NMPlatform instance was mainly to hook fake platform for
testing.
While the implicit argument saved some typing, I think explicit is
better. Especially, because NMPlatform could become a more usable
object then just a hook for testing.
With this change, NMPlatform instances can be used individually, not
only as a singleton instance.
Before this change, the constructor of NMLinuxPlatform could not
call any nm_platform_*() functions because the singleton was not
yet initialized. We could only instantiate an incomplete instance,
register it via nm_platform_setup(), and then complete initialization
via singleton->setup().
With this change, we can create and fully initialize NMPlatform instances
before/without setting them up them as singleton.
Also, currently there is no clear distinction between functions
that operate on the NMPlatform instance, and functions that can
be used stand-alone (e.g. nm_platform_ip4_address_to_string()).
The latter can not be mocked for testing. With this change, the
distinction becomes obvious. That is also useful because it becomes
clearer which functions make use of the platform cache and which not.
Inside nm-linux-platform.c, continue the pattern that the
self instance is named @platform. That makes sense because
its type is NMPlatform, and not NMLinuxPlatform what we
would expect from a paramter named @self.
This is a major diff that causes some pain when rebasing. Try
to rebase to the parent commit of this commit as a first step.
Then rebase on top of this commit using merge-strategy "ours".
2015-04-18 12:36:09 +02:00
const char * name = nm_platform_link_get_name ( platform , master ) ;
2013-04-04 17:07:47 +02:00
if ( ! name | | ! category | | ! option )
return NULL ;
2014-03-12 12:49:34 +01:00
return g_strdup_printf ( " /sys/class/net/%s/%s/%s " ,
ASSERT_VALID_PATH_COMPONENT ( name ) ,
ASSERT_VALID_PATH_COMPONENT ( category ) ,
ASSERT_VALID_PATH_COMPONENT ( option ) ) ;
2013-04-04 17:07:47 +02:00
}
static gboolean
platform: add self argument to platform functions
Most nm_platform_*() functions operate on the platform
singleton nm_platform_get(). That made sense because the
NMPlatform instance was mainly to hook fake platform for
testing.
While the implicit argument saved some typing, I think explicit is
better. Especially, because NMPlatform could become a more usable
object then just a hook for testing.
With this change, NMPlatform instances can be used individually, not
only as a singleton instance.
Before this change, the constructor of NMLinuxPlatform could not
call any nm_platform_*() functions because the singleton was not
yet initialized. We could only instantiate an incomplete instance,
register it via nm_platform_setup(), and then complete initialization
via singleton->setup().
With this change, we can create and fully initialize NMPlatform instances
before/without setting them up them as singleton.
Also, currently there is no clear distinction between functions
that operate on the NMPlatform instance, and functions that can
be used stand-alone (e.g. nm_platform_ip4_address_to_string()).
The latter can not be mocked for testing. With this change, the
distinction becomes obvious. That is also useful because it becomes
clearer which functions make use of the platform cache and which not.
Inside nm-linux-platform.c, continue the pattern that the
self instance is named @platform. That makes sense because
its type is NMPlatform, and not NMLinuxPlatform what we
would expect from a paramter named @self.
This is a major diff that causes some pain when rebasing. Try
to rebase to the parent commit of this commit as a first step.
Then rebase on top of this commit using merge-strategy "ours".
2015-04-18 12:36:09 +02:00
link_set_option ( NMPlatform * platform , int master , const char * category , const char * option , const char * value )
2013-04-04 17:07:47 +02:00
{
platform: add self argument to platform functions
Most nm_platform_*() functions operate on the platform
singleton nm_platform_get(). That made sense because the
NMPlatform instance was mainly to hook fake platform for
testing.
While the implicit argument saved some typing, I think explicit is
better. Especially, because NMPlatform could become a more usable
object then just a hook for testing.
With this change, NMPlatform instances can be used individually, not
only as a singleton instance.
Before this change, the constructor of NMLinuxPlatform could not
call any nm_platform_*() functions because the singleton was not
yet initialized. We could only instantiate an incomplete instance,
register it via nm_platform_setup(), and then complete initialization
via singleton->setup().
With this change, we can create and fully initialize NMPlatform instances
before/without setting them up them as singleton.
Also, currently there is no clear distinction between functions
that operate on the NMPlatform instance, and functions that can
be used stand-alone (e.g. nm_platform_ip4_address_to_string()).
The latter can not be mocked for testing. With this change, the
distinction becomes obvious. That is also useful because it becomes
clearer which functions make use of the platform cache and which not.
Inside nm-linux-platform.c, continue the pattern that the
self instance is named @platform. That makes sense because
its type is NMPlatform, and not NMLinuxPlatform what we
would expect from a paramter named @self.
This is a major diff that causes some pain when rebasing. Try
to rebase to the parent commit of this commit as a first step.
Then rebase on top of this commit using merge-strategy "ours".
2015-04-18 12:36:09 +02:00
gs_free char * path = link_option_path ( platform , master , category , option ) ;
2013-04-04 17:07:47 +02:00
platform: add self argument to platform functions
Most nm_platform_*() functions operate on the platform
singleton nm_platform_get(). That made sense because the
NMPlatform instance was mainly to hook fake platform for
testing.
While the implicit argument saved some typing, I think explicit is
better. Especially, because NMPlatform could become a more usable
object then just a hook for testing.
With this change, NMPlatform instances can be used individually, not
only as a singleton instance.
Before this change, the constructor of NMLinuxPlatform could not
call any nm_platform_*() functions because the singleton was not
yet initialized. We could only instantiate an incomplete instance,
register it via nm_platform_setup(), and then complete initialization
via singleton->setup().
With this change, we can create and fully initialize NMPlatform instances
before/without setting them up them as singleton.
Also, currently there is no clear distinction between functions
that operate on the NMPlatform instance, and functions that can
be used stand-alone (e.g. nm_platform_ip4_address_to_string()).
The latter can not be mocked for testing. With this change, the
distinction becomes obvious. That is also useful because it becomes
clearer which functions make use of the platform cache and which not.
Inside nm-linux-platform.c, continue the pattern that the
self instance is named @platform. That makes sense because
its type is NMPlatform, and not NMLinuxPlatform what we
would expect from a paramter named @self.
This is a major diff that causes some pain when rebasing. Try
to rebase to the parent commit of this commit as a first step.
Then rebase on top of this commit using merge-strategy "ours".
2015-04-18 12:36:09 +02:00
return path & & nm_platform_sysctl_set ( platform , path , value ) ;
2013-04-04 17:07:47 +02:00
}
static char *
platform: add self argument to platform functions
Most nm_platform_*() functions operate on the platform
singleton nm_platform_get(). That made sense because the
NMPlatform instance was mainly to hook fake platform for
testing.
While the implicit argument saved some typing, I think explicit is
better. Especially, because NMPlatform could become a more usable
object then just a hook for testing.
With this change, NMPlatform instances can be used individually, not
only as a singleton instance.
Before this change, the constructor of NMLinuxPlatform could not
call any nm_platform_*() functions because the singleton was not
yet initialized. We could only instantiate an incomplete instance,
register it via nm_platform_setup(), and then complete initialization
via singleton->setup().
With this change, we can create and fully initialize NMPlatform instances
before/without setting them up them as singleton.
Also, currently there is no clear distinction between functions
that operate on the NMPlatform instance, and functions that can
be used stand-alone (e.g. nm_platform_ip4_address_to_string()).
The latter can not be mocked for testing. With this change, the
distinction becomes obvious. That is also useful because it becomes
clearer which functions make use of the platform cache and which not.
Inside nm-linux-platform.c, continue the pattern that the
self instance is named @platform. That makes sense because
its type is NMPlatform, and not NMLinuxPlatform what we
would expect from a paramter named @self.
This is a major diff that causes some pain when rebasing. Try
to rebase to the parent commit of this commit as a first step.
Then rebase on top of this commit using merge-strategy "ours".
2015-04-18 12:36:09 +02:00
link_get_option ( NMPlatform * platform , int master , const char * category , const char * option )
2013-04-04 17:07:47 +02:00
{
platform: add self argument to platform functions
Most nm_platform_*() functions operate on the platform
singleton nm_platform_get(). That made sense because the
NMPlatform instance was mainly to hook fake platform for
testing.
While the implicit argument saved some typing, I think explicit is
better. Especially, because NMPlatform could become a more usable
object then just a hook for testing.
With this change, NMPlatform instances can be used individually, not
only as a singleton instance.
Before this change, the constructor of NMLinuxPlatform could not
call any nm_platform_*() functions because the singleton was not
yet initialized. We could only instantiate an incomplete instance,
register it via nm_platform_setup(), and then complete initialization
via singleton->setup().
With this change, we can create and fully initialize NMPlatform instances
before/without setting them up them as singleton.
Also, currently there is no clear distinction between functions
that operate on the NMPlatform instance, and functions that can
be used stand-alone (e.g. nm_platform_ip4_address_to_string()).
The latter can not be mocked for testing. With this change, the
distinction becomes obvious. That is also useful because it becomes
clearer which functions make use of the platform cache and which not.
Inside nm-linux-platform.c, continue the pattern that the
self instance is named @platform. That makes sense because
its type is NMPlatform, and not NMLinuxPlatform what we
would expect from a paramter named @self.
This is a major diff that causes some pain when rebasing. Try
to rebase to the parent commit of this commit as a first step.
Then rebase on top of this commit using merge-strategy "ours".
2015-04-18 12:36:09 +02:00
gs_free char * path = link_option_path ( platform , master , category , option ) ;
2013-04-04 17:07:47 +02:00
platform: add self argument to platform functions
Most nm_platform_*() functions operate on the platform
singleton nm_platform_get(). That made sense because the
NMPlatform instance was mainly to hook fake platform for
testing.
While the implicit argument saved some typing, I think explicit is
better. Especially, because NMPlatform could become a more usable
object then just a hook for testing.
With this change, NMPlatform instances can be used individually, not
only as a singleton instance.
Before this change, the constructor of NMLinuxPlatform could not
call any nm_platform_*() functions because the singleton was not
yet initialized. We could only instantiate an incomplete instance,
register it via nm_platform_setup(), and then complete initialization
via singleton->setup().
With this change, we can create and fully initialize NMPlatform instances
before/without setting them up them as singleton.
Also, currently there is no clear distinction between functions
that operate on the NMPlatform instance, and functions that can
be used stand-alone (e.g. nm_platform_ip4_address_to_string()).
The latter can not be mocked for testing. With this change, the
distinction becomes obvious. That is also useful because it becomes
clearer which functions make use of the platform cache and which not.
Inside nm-linux-platform.c, continue the pattern that the
self instance is named @platform. That makes sense because
its type is NMPlatform, and not NMLinuxPlatform what we
would expect from a paramter named @self.
This is a major diff that causes some pain when rebasing. Try
to rebase to the parent commit of this commit as a first step.
Then rebase on top of this commit using merge-strategy "ours".
2015-04-18 12:36:09 +02:00
return path ? nm_platform_sysctl_get ( platform , path ) : NULL ;
2013-04-04 17:07:47 +02:00
}
static const char *
master_category ( NMPlatform * platform , int master )
{
2015-06-20 12:05:01 +02:00
switch ( nm_platform_link_get_type ( platform , master ) ) {
2013-04-04 17:07:47 +02:00
case NM_LINK_TYPE_BRIDGE :
return " bridge " ;
case NM_LINK_TYPE_BOND :
return " bonding " ;
default :
2014-02-11 22:10:05 +01:00
return NULL ;
2013-04-04 17:07:47 +02:00
}
}
static const char *
slave_category ( NMPlatform * platform , int slave )
{
2015-06-20 12:05:01 +02:00
int master = nm_platform_link_get_master ( platform , slave ) ;
2013-04-04 17:07:47 +02:00
platform: drop nm_platform_get_error()
For NMPlatform instances we had an error reporting mechanism
which stores the last error reason in a private field. Later we
would check it via nm_platform_get_error().
Remove this. It was not used much, and it is not a great way
to report errors.
One problem is that at the point where the error happens, you don't
know whether anybody cares about an error code. So, you add code to set
the error reason because somebody *might* need it (but in realitiy, almost
no caller cares).
Also, we tested this functionality which is hardly used in non-testing code.
While this was a burden to maintain in the tests, it was likely still buggy
because there were no real use-cases, beside the tests.
Then, sometimes platform functions call each other which might overwrite the
error reason. So, every function must be cautious to preserve/set
the error reason according to it's own meaning. This can involve storing
the error code, calling another function, and restoring it afterwards.
This is harder to get right compared to a "return-error-code" pattern, where
every function manages its error code independently.
It is better to return the error reason whenever due. For that we already
have our common glib patterns
(1) gboolean fcn (...);
(2) gboolean fcn (..., GError **error);
In few cases, we need more details then a #gboolean, but don't want
to bother constructing a #GError. Then we should do instead:
(3) NMPlatformError fcn (...);
2015-06-15 17:58:36 +02:00
if ( master < = 0 )
2013-04-04 17:07:47 +02:00
return NULL ;
2015-06-20 12:05:01 +02:00
switch ( nm_platform_link_get_type ( platform , master ) ) {
2013-04-04 17:07:47 +02:00
case NM_LINK_TYPE_BRIDGE :
return " brport " ;
default :
2014-02-11 22:10:05 +01:00
return NULL ;
2013-04-04 17:07:47 +02:00
}
}
static gboolean
master_set_option ( NMPlatform * platform , int master , const char * option , const char * value )
{
platform: add self argument to platform functions
Most nm_platform_*() functions operate on the platform
singleton nm_platform_get(). That made sense because the
NMPlatform instance was mainly to hook fake platform for
testing.
While the implicit argument saved some typing, I think explicit is
better. Especially, because NMPlatform could become a more usable
object then just a hook for testing.
With this change, NMPlatform instances can be used individually, not
only as a singleton instance.
Before this change, the constructor of NMLinuxPlatform could not
call any nm_platform_*() functions because the singleton was not
yet initialized. We could only instantiate an incomplete instance,
register it via nm_platform_setup(), and then complete initialization
via singleton->setup().
With this change, we can create and fully initialize NMPlatform instances
before/without setting them up them as singleton.
Also, currently there is no clear distinction between functions
that operate on the NMPlatform instance, and functions that can
be used stand-alone (e.g. nm_platform_ip4_address_to_string()).
The latter can not be mocked for testing. With this change, the
distinction becomes obvious. That is also useful because it becomes
clearer which functions make use of the platform cache and which not.
Inside nm-linux-platform.c, continue the pattern that the
self instance is named @platform. That makes sense because
its type is NMPlatform, and not NMLinuxPlatform what we
would expect from a paramter named @self.
This is a major diff that causes some pain when rebasing. Try
to rebase to the parent commit of this commit as a first step.
Then rebase on top of this commit using merge-strategy "ours".
2015-04-18 12:36:09 +02:00
return link_set_option ( platform , master , master_category ( platform , master ) , option , value ) ;
2013-04-04 17:07:47 +02:00
}
static char *
master_get_option ( NMPlatform * platform , int master , const char * option )
{
platform: add self argument to platform functions
Most nm_platform_*() functions operate on the platform
singleton nm_platform_get(). That made sense because the
NMPlatform instance was mainly to hook fake platform for
testing.
While the implicit argument saved some typing, I think explicit is
better. Especially, because NMPlatform could become a more usable
object then just a hook for testing.
With this change, NMPlatform instances can be used individually, not
only as a singleton instance.
Before this change, the constructor of NMLinuxPlatform could not
call any nm_platform_*() functions because the singleton was not
yet initialized. We could only instantiate an incomplete instance,
register it via nm_platform_setup(), and then complete initialization
via singleton->setup().
With this change, we can create and fully initialize NMPlatform instances
before/without setting them up them as singleton.
Also, currently there is no clear distinction between functions
that operate on the NMPlatform instance, and functions that can
be used stand-alone (e.g. nm_platform_ip4_address_to_string()).
The latter can not be mocked for testing. With this change, the
distinction becomes obvious. That is also useful because it becomes
clearer which functions make use of the platform cache and which not.
Inside nm-linux-platform.c, continue the pattern that the
self instance is named @platform. That makes sense because
its type is NMPlatform, and not NMLinuxPlatform what we
would expect from a paramter named @self.
This is a major diff that causes some pain when rebasing. Try
to rebase to the parent commit of this commit as a first step.
Then rebase on top of this commit using merge-strategy "ours".
2015-04-18 12:36:09 +02:00
return link_get_option ( platform , master , master_category ( platform , master ) , option ) ;
2013-04-04 17:07:47 +02:00
}
static gboolean
slave_set_option ( NMPlatform * platform , int slave , const char * option , const char * value )
{
platform: add self argument to platform functions
Most nm_platform_*() functions operate on the platform
singleton nm_platform_get(). That made sense because the
NMPlatform instance was mainly to hook fake platform for
testing.
While the implicit argument saved some typing, I think explicit is
better. Especially, because NMPlatform could become a more usable
object then just a hook for testing.
With this change, NMPlatform instances can be used individually, not
only as a singleton instance.
Before this change, the constructor of NMLinuxPlatform could not
call any nm_platform_*() functions because the singleton was not
yet initialized. We could only instantiate an incomplete instance,
register it via nm_platform_setup(), and then complete initialization
via singleton->setup().
With this change, we can create and fully initialize NMPlatform instances
before/without setting them up them as singleton.
Also, currently there is no clear distinction between functions
that operate on the NMPlatform instance, and functions that can
be used stand-alone (e.g. nm_platform_ip4_address_to_string()).
The latter can not be mocked for testing. With this change, the
distinction becomes obvious. That is also useful because it becomes
clearer which functions make use of the platform cache and which not.
Inside nm-linux-platform.c, continue the pattern that the
self instance is named @platform. That makes sense because
its type is NMPlatform, and not NMLinuxPlatform what we
would expect from a paramter named @self.
This is a major diff that causes some pain when rebasing. Try
to rebase to the parent commit of this commit as a first step.
Then rebase on top of this commit using merge-strategy "ours".
2015-04-18 12:36:09 +02:00
return link_set_option ( platform , slave , slave_category ( platform , slave ) , option , value ) ;
2013-04-04 17:07:47 +02:00
}
static char *
slave_get_option ( NMPlatform * platform , int slave , const char * option )
{
platform: add self argument to platform functions
Most nm_platform_*() functions operate on the platform
singleton nm_platform_get(). That made sense because the
NMPlatform instance was mainly to hook fake platform for
testing.
While the implicit argument saved some typing, I think explicit is
better. Especially, because NMPlatform could become a more usable
object then just a hook for testing.
With this change, NMPlatform instances can be used individually, not
only as a singleton instance.
Before this change, the constructor of NMLinuxPlatform could not
call any nm_platform_*() functions because the singleton was not
yet initialized. We could only instantiate an incomplete instance,
register it via nm_platform_setup(), and then complete initialization
via singleton->setup().
With this change, we can create and fully initialize NMPlatform instances
before/without setting them up them as singleton.
Also, currently there is no clear distinction between functions
that operate on the NMPlatform instance, and functions that can
be used stand-alone (e.g. nm_platform_ip4_address_to_string()).
The latter can not be mocked for testing. With this change, the
distinction becomes obvious. That is also useful because it becomes
clearer which functions make use of the platform cache and which not.
Inside nm-linux-platform.c, continue the pattern that the
self instance is named @platform. That makes sense because
its type is NMPlatform, and not NMLinuxPlatform what we
would expect from a paramter named @self.
This is a major diff that causes some pain when rebasing. Try
to rebase to the parent commit of this commit as a first step.
Then rebase on top of this commit using merge-strategy "ours".
2015-04-18 12:36:09 +02:00
return link_get_option ( platform , slave , slave_category ( platform , slave ) , option ) ;
2013-04-04 17:07:47 +02:00
}
2013-06-10 16:21:08 -03:00
static gboolean
2014-09-18 12:53:19 -05:00
infiniband_partition_add ( NMPlatform * platform , int parent , int p_key , NMPlatformLink * out_link )
2013-06-10 16:21:08 -03:00
{
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
const NMPObject * obj_parent ;
const NMPObject * obj ;
gs_free char * path = NULL ;
gs_free char * id = NULL ;
gs_free char * ifname = NULL ;
2013-06-10 16:21:08 -03:00
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
obj_parent = nmp_cache_lookup_link ( priv - > cache , parent ) ;
if ( ! obj_parent | | ! obj_parent - > link . name [ 0 ] )
g_return_val_if_reached ( FALSE ) ;
2013-06-10 16:21:08 -03:00
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
ifname = g_strdup_printf ( " %s.%04x " , obj_parent - > link . name , p_key ) ;
path = g_strdup_printf ( " /sys/class/net/%s/create_child " , ASSERT_VALID_PATH_COMPONENT ( obj_parent - > link . name ) ) ;
2013-06-10 16:21:08 -03:00
id = g_strdup_printf ( " 0x%04x " , p_key ) ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
if ( ! nm_platform_sysctl_set ( platform , path , id ) )
return FALSE ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
do_request_link ( platform , 0 , ifname , TRUE ) ;
2013-06-10 16:21:08 -03:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
obj = nmp_cache_lookup_link_full ( NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) - > cache ,
0 , ifname , FALSE , NM_LINK_TYPE_INFINIBAND , NULL , NULL ) ;
if ( out_link & & obj )
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
* out_link = obj - > link ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
return ! ! obj ;
2013-06-10 16:21:08 -03:00
}
2014-10-06 09:37:34 -05:00
typedef struct {
int p_key ;
const char * mode ;
} IpoibInfo ;
/* IFLA_IPOIB_* were introduced in the 3.7 kernel, but the kernel headers
* we ' re building against might not have those properties even though the
* running kernel might .
*/
# define IFLA_IPOIB_UNSPEC 0
# define IFLA_IPOIB_PKEY 1
# define IFLA_IPOIB_MODE 2
# define IFLA_IPOIB_UMCAST 3
# undef IFLA_IPOIB_MAX
# define IFLA_IPOIB_MAX IFLA_IPOIB_UMCAST
# define IPOIB_MODE_DATAGRAM 0 /* using unreliable datagram QPs */
# define IPOIB_MODE_CONNECTED 1 /* using connected QPs */
static const struct nla_policy infiniband_info_policy [ IFLA_IPOIB_MAX + 1 ] = {
[ IFLA_IPOIB_PKEY ] = { . type = NLA_U16 } ,
[ IFLA_IPOIB_MODE ] = { . type = NLA_U16 } ,
[ IFLA_IPOIB_UMCAST ] = { . type = NLA_U16 } ,
} ;
static int
infiniband_info_data_parser ( struct nlattr * info_data , gpointer parser_data )
{
IpoibInfo * info = parser_data ;
struct nlattr * tb [ IFLA_MACVLAN_MAX + 1 ] ;
int err ;
err = nla_parse_nested ( tb , IFLA_IPOIB_MAX , info_data ,
( struct nla_policy * ) infiniband_info_policy ) ;
if ( err < 0 )
return err ;
if ( ! tb [ IFLA_IPOIB_PKEY ] | | ! tb [ IFLA_IPOIB_MODE ] )
return - EINVAL ;
info - > p_key = nla_get_u16 ( tb [ IFLA_IPOIB_PKEY ] ) ;
switch ( nla_get_u16 ( tb [ IFLA_IPOIB_MODE ] ) ) {
case IPOIB_MODE_DATAGRAM :
info - > mode = " datagram " ;
break ;
case IPOIB_MODE_CONNECTED :
info - > mode = " connected " ;
break ;
default :
return - NLE_PARSE_ERR ;
}
return 0 ;
}
static gboolean
infiniband_get_info ( NMPlatform * platform , int ifindex , int * parent , int * p_key , const char * * mode )
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
const NMPObject * obj ;
2014-10-06 09:37:34 -05:00
IpoibInfo info = { - 1 , NULL } ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
obj = cache_lookup_link ( platform , ifindex ) ;
if ( ! obj )
2014-10-06 09:37:34 -05:00
return FALSE ;
if ( parent )
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
* parent = obj - > link . parent ;
2014-10-06 09:37:34 -05:00
2015-05-29 09:38:26 +02:00
if ( _nl_link_parse_info_data ( priv - > nlh ,
ifindex ,
infiniband_info_data_parser ,
& info ) ! = 0 ) {
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
const char * iface = obj - > link . name ;
2014-10-06 09:37:34 -05:00
char * path , * contents = NULL ;
/* Fall back to reading sysfs */
path = g_strdup_printf ( " /sys/class/net/%s/mode " , ASSERT_VALID_PATH_COMPONENT ( iface ) ) ;
contents = nm_platform_sysctl_get ( platform , path ) ;
g_free ( path ) ;
if ( ! contents )
return FALSE ;
if ( strstr ( contents , " datagram " ) )
info . mode = " datagram " ;
else if ( strstr ( contents , " connected " ) )
info . mode = " connected " ;
g_free ( contents ) ;
path = g_strdup_printf ( " /sys/class/net/%s/pkey " , ASSERT_VALID_PATH_COMPONENT ( iface ) ) ;
contents = nm_platform_sysctl_get ( platform , path ) ;
g_free ( path ) ;
if ( ! contents )
return FALSE ;
info . p_key = ( int ) _nm_utils_ascii_str_to_int64 ( contents , 16 , 0 , 0xFFFF , - 1 ) ;
g_free ( contents ) ;
if ( info . p_key < 0 )
return FALSE ;
}
if ( p_key )
* p_key = info . p_key ;
if ( mode )
* mode = info . mode ;
return TRUE ;
}
2013-05-03 13:55:51 -04:00
static gboolean
veth_get_properties ( NMPlatform * platform , int ifindex , NMPlatformVethProperties * props )
{
const char * ifname ;
2015-05-03 10:11:31 +02:00
int peer_ifindex ;
2013-05-03 13:55:51 -04:00
platform: add self argument to platform functions
Most nm_platform_*() functions operate on the platform
singleton nm_platform_get(). That made sense because the
NMPlatform instance was mainly to hook fake platform for
testing.
While the implicit argument saved some typing, I think explicit is
better. Especially, because NMPlatform could become a more usable
object then just a hook for testing.
With this change, NMPlatform instances can be used individually, not
only as a singleton instance.
Before this change, the constructor of NMLinuxPlatform could not
call any nm_platform_*() functions because the singleton was not
yet initialized. We could only instantiate an incomplete instance,
register it via nm_platform_setup(), and then complete initialization
via singleton->setup().
With this change, we can create and fully initialize NMPlatform instances
before/without setting them up them as singleton.
Also, currently there is no clear distinction between functions
that operate on the NMPlatform instance, and functions that can
be used stand-alone (e.g. nm_platform_ip4_address_to_string()).
The latter can not be mocked for testing. With this change, the
distinction becomes obvious. That is also useful because it becomes
clearer which functions make use of the platform cache and which not.
Inside nm-linux-platform.c, continue the pattern that the
self instance is named @platform. That makes sense because
its type is NMPlatform, and not NMLinuxPlatform what we
would expect from a paramter named @self.
This is a major diff that causes some pain when rebasing. Try
to rebase to the parent commit of this commit as a first step.
Then rebase on top of this commit using merge-strategy "ours".
2015-04-18 12:36:09 +02:00
ifname = nm_platform_link_get_name ( platform , ifindex ) ;
2013-05-03 13:55:51 -04:00
if ( ! ifname )
return FALSE ;
2015-05-02 07:59:59 +02:00
peer_ifindex = nmp_utils_ethtool_get_peer_ifindex ( ifname ) ;
2015-05-03 10:11:31 +02:00
if ( peer_ifindex < = 0 )
2013-05-03 13:55:51 -04:00
return FALSE ;
2015-05-03 10:11:31 +02:00
props - > peer = peer_ifindex ;
2013-05-03 13:55:51 -04:00
return TRUE ;
}
2013-04-25 15:46:39 -04:00
static gboolean
2015-04-23 23:16:00 +02:00
tun_get_properties_ifname ( NMPlatform * platform , const char * ifname , NMPlatformTunProperties * props )
2013-04-25 15:46:39 -04:00
{
char * path , * val ;
2014-01-21 11:04:26 +01:00
gboolean success = TRUE ;
g_return_val_if_fail ( props , FALSE ) ;
memset ( props , 0 , sizeof ( * props ) ) ;
props - > owner = - 1 ;
props - > group = - 1 ;
2013-04-25 15:46:39 -04:00
2014-01-21 11:04:26 +01:00
if ( ! ifname | | ! nm_utils_iface_valid_name ( ifname ) )
2013-04-25 15:46:39 -04:00
return FALSE ;
2014-03-12 12:49:34 +01:00
ifname = ASSERT_VALID_PATH_COMPONENT ( ifname ) ;
2013-04-25 15:46:39 -04:00
path = g_strdup_printf ( " /sys/class/net/%s/owner " , ifname ) ;
platform: add self argument to platform functions
Most nm_platform_*() functions operate on the platform
singleton nm_platform_get(). That made sense because the
NMPlatform instance was mainly to hook fake platform for
testing.
While the implicit argument saved some typing, I think explicit is
better. Especially, because NMPlatform could become a more usable
object then just a hook for testing.
With this change, NMPlatform instances can be used individually, not
only as a singleton instance.
Before this change, the constructor of NMLinuxPlatform could not
call any nm_platform_*() functions because the singleton was not
yet initialized. We could only instantiate an incomplete instance,
register it via nm_platform_setup(), and then complete initialization
via singleton->setup().
With this change, we can create and fully initialize NMPlatform instances
before/without setting them up them as singleton.
Also, currently there is no clear distinction between functions
that operate on the NMPlatform instance, and functions that can
be used stand-alone (e.g. nm_platform_ip4_address_to_string()).
The latter can not be mocked for testing. With this change, the
distinction becomes obvious. That is also useful because it becomes
clearer which functions make use of the platform cache and which not.
Inside nm-linux-platform.c, continue the pattern that the
self instance is named @platform. That makes sense because
its type is NMPlatform, and not NMLinuxPlatform what we
would expect from a paramter named @self.
This is a major diff that causes some pain when rebasing. Try
to rebase to the parent commit of this commit as a first step.
Then rebase on top of this commit using merge-strategy "ours".
2015-04-18 12:36:09 +02:00
val = nm_platform_sysctl_get ( platform , path ) ;
2013-04-25 15:46:39 -04:00
g_free ( path ) ;
2014-01-21 11:04:26 +01:00
if ( val ) {
2015-02-22 11:55:31 +01:00
props - > owner = _nm_utils_ascii_str_to_int64 ( val , 10 , - 1 , G_MAXINT64 , - 1 ) ;
2014-01-21 11:04:26 +01:00
if ( errno )
success = FALSE ;
g_free ( val ) ;
} else
success = FALSE ;
2013-04-25 15:46:39 -04:00
path = g_strdup_printf ( " /sys/class/net/%s/group " , ifname ) ;
platform: add self argument to platform functions
Most nm_platform_*() functions operate on the platform
singleton nm_platform_get(). That made sense because the
NMPlatform instance was mainly to hook fake platform for
testing.
While the implicit argument saved some typing, I think explicit is
better. Especially, because NMPlatform could become a more usable
object then just a hook for testing.
With this change, NMPlatform instances can be used individually, not
only as a singleton instance.
Before this change, the constructor of NMLinuxPlatform could not
call any nm_platform_*() functions because the singleton was not
yet initialized. We could only instantiate an incomplete instance,
register it via nm_platform_setup(), and then complete initialization
via singleton->setup().
With this change, we can create and fully initialize NMPlatform instances
before/without setting them up them as singleton.
Also, currently there is no clear distinction between functions
that operate on the NMPlatform instance, and functions that can
be used stand-alone (e.g. nm_platform_ip4_address_to_string()).
The latter can not be mocked for testing. With this change, the
distinction becomes obvious. That is also useful because it becomes
clearer which functions make use of the platform cache and which not.
Inside nm-linux-platform.c, continue the pattern that the
self instance is named @platform. That makes sense because
its type is NMPlatform, and not NMLinuxPlatform what we
would expect from a paramter named @self.
This is a major diff that causes some pain when rebasing. Try
to rebase to the parent commit of this commit as a first step.
Then rebase on top of this commit using merge-strategy "ours".
2015-04-18 12:36:09 +02:00
val = nm_platform_sysctl_get ( platform , path ) ;
2013-04-25 15:46:39 -04:00
g_free ( path ) ;
2014-01-21 11:04:26 +01:00
if ( val ) {
2015-02-22 11:55:31 +01:00
props - > group = _nm_utils_ascii_str_to_int64 ( val , 10 , - 1 , G_MAXINT64 , - 1 ) ;
2014-01-21 11:04:26 +01:00
if ( errno )
success = FALSE ;
g_free ( val ) ;
} else
success = FALSE ;
2013-04-25 15:46:39 -04:00
path = g_strdup_printf ( " /sys/class/net/%s/tun_flags " , ifname ) ;
platform: add self argument to platform functions
Most nm_platform_*() functions operate on the platform
singleton nm_platform_get(). That made sense because the
NMPlatform instance was mainly to hook fake platform for
testing.
While the implicit argument saved some typing, I think explicit is
better. Especially, because NMPlatform could become a more usable
object then just a hook for testing.
With this change, NMPlatform instances can be used individually, not
only as a singleton instance.
Before this change, the constructor of NMLinuxPlatform could not
call any nm_platform_*() functions because the singleton was not
yet initialized. We could only instantiate an incomplete instance,
register it via nm_platform_setup(), and then complete initialization
via singleton->setup().
With this change, we can create and fully initialize NMPlatform instances
before/without setting them up them as singleton.
Also, currently there is no clear distinction between functions
that operate on the NMPlatform instance, and functions that can
be used stand-alone (e.g. nm_platform_ip4_address_to_string()).
The latter can not be mocked for testing. With this change, the
distinction becomes obvious. That is also useful because it becomes
clearer which functions make use of the platform cache and which not.
Inside nm-linux-platform.c, continue the pattern that the
self instance is named @platform. That makes sense because
its type is NMPlatform, and not NMLinuxPlatform what we
would expect from a paramter named @self.
This is a major diff that causes some pain when rebasing. Try
to rebase to the parent commit of this commit as a first step.
Then rebase on top of this commit using merge-strategy "ours".
2015-04-18 12:36:09 +02:00
val = nm_platform_sysctl_get ( platform , path ) ;
2013-04-25 15:46:39 -04:00
g_free ( path ) ;
2014-01-21 11:04:26 +01:00
if ( val ) {
gint64 flags ;
2015-02-22 11:55:31 +01:00
flags = _nm_utils_ascii_str_to_int64 ( val , 16 , 0 , G_MAXINT64 , 0 ) ;
2014-01-21 11:04:26 +01:00
if ( ! errno ) {
# ifndef IFF_MULTI_QUEUE
const int IFF_MULTI_QUEUE = 0x0100 ;
2013-06-04 12:01:38 -03:00
# endif
2014-11-19 18:38:13 -06:00
props - > mode = ( ( flags & ( IFF_TUN | IFF_TAP ) ) = = IFF_TUN ) ? " tun " : " tap " ;
2014-01-21 11:04:26 +01:00
props - > no_pi = ! ! ( flags & IFF_NO_PI ) ;
props - > vnet_hdr = ! ! ( flags & IFF_VNET_HDR ) ;
props - > multi_queue = ! ! ( flags & IFF_MULTI_QUEUE ) ;
} else
success = FALSE ;
g_free ( val ) ;
} else
success = FALSE ;
2013-04-25 15:46:39 -04:00
2014-01-21 11:04:26 +01:00
return success ;
2013-04-25 15:46:39 -04:00
}
2015-04-23 23:16:00 +02:00
static gboolean
tun_get_properties ( NMPlatform * platform , int ifindex , NMPlatformTunProperties * props )
{
return tun_get_properties_ifname ( platform , nm_platform_link_get_name ( platform , ifindex ) , props ) ;
}
2013-05-06 09:16:17 -04:00
static const struct nla_policy macvlan_info_policy [ IFLA_MACVLAN_MAX + 1 ] = {
[ IFLA_MACVLAN_MODE ] = { . type = NLA_U32 } ,
2014-02-20 14:28:30 -05:00
# ifdef MACVLAN_FLAG_NOPROMISC
2013-05-06 09:16:17 -04:00
[ IFLA_MACVLAN_FLAGS ] = { . type = NLA_U16 } ,
2013-06-04 12:01:38 -03:00
# endif
2013-05-06 09:16:17 -04:00
} ;
static int
macvlan_info_data_parser ( struct nlattr * info_data , gpointer parser_data )
{
NMPlatformMacvlanProperties * props = parser_data ;
struct nlattr * tb [ IFLA_MACVLAN_MAX + 1 ] ;
int err ;
err = nla_parse_nested ( tb , IFLA_MACVLAN_MAX , info_data ,
( struct nla_policy * ) macvlan_info_policy ) ;
if ( err < 0 )
return err ;
switch ( nla_get_u32 ( tb [ IFLA_MACVLAN_MODE ] ) ) {
case MACVLAN_MODE_PRIVATE :
props - > mode = " private " ;
break ;
case MACVLAN_MODE_VEPA :
props - > mode = " vepa " ;
break ;
case MACVLAN_MODE_BRIDGE :
props - > mode = " bridge " ;
break ;
case MACVLAN_MODE_PASSTHRU :
props - > mode = " passthru " ;
break ;
default :
return - NLE_PARSE_ERR ;
}
2013-06-04 12:01:38 -03:00
# ifdef MACVLAN_FLAG_NOPROMISC
2013-05-06 09:16:17 -04:00
props - > no_promisc = ! ! ( nla_get_u16 ( tb [ IFLA_MACVLAN_FLAGS ] ) & MACVLAN_FLAG_NOPROMISC ) ;
2013-06-04 12:01:38 -03:00
# else
props - > no_promisc = FALSE ;
# endif
2013-05-06 09:16:17 -04:00
return 0 ;
}
static gboolean
macvlan_get_properties ( NMPlatform * platform , int ifindex , NMPlatformMacvlanProperties * props )
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
int err ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
const NMPObject * obj ;
2013-05-06 09:16:17 -04:00
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
obj = cache_lookup_link ( platform , ifindex ) ;
if ( ! obj )
2013-05-06 09:16:17 -04:00
return FALSE ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
props - > parent_ifindex = obj - > link . parent ;
2013-05-06 09:16:17 -04:00
2015-05-29 09:38:26 +02:00
err = _nl_link_parse_info_data ( priv - > nlh , ifindex ,
macvlan_info_data_parser , props ) ;
2014-02-20 14:40:35 -05:00
if ( err ! = 0 ) {
warning ( " (%s) could not read properties: %s " ,
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
obj - > link . name , nl_geterror ( err ) ) ;
2014-02-20 14:40:35 -05:00
}
2013-05-06 09:16:17 -04:00
return ( err = = 0 ) ;
}
2013-06-04 10:31:22 -03:00
/* The installed kernel headers might not have VXLAN stuff at all, or
* they might have the original properties , but not PORT , GROUP6 , or LOCAL6 .
* So until we depend on kernel > = 3.11 , we just ignore the actual enum
* in if_link . h and define the values ourselves .
*/
# define IFLA_VXLAN_UNSPEC 0
# define IFLA_VXLAN_ID 1
# define IFLA_VXLAN_GROUP 2
# define IFLA_VXLAN_LINK 3
# define IFLA_VXLAN_LOCAL 4
# define IFLA_VXLAN_TTL 5
# define IFLA_VXLAN_TOS 6
# define IFLA_VXLAN_LEARNING 7
# define IFLA_VXLAN_AGEING 8
# define IFLA_VXLAN_LIMIT 9
# define IFLA_VXLAN_PORT_RANGE 10
# define IFLA_VXLAN_PROXY 11
# define IFLA_VXLAN_RSC 12
# define IFLA_VXLAN_L2MISS 13
# define IFLA_VXLAN_L3MISS 14
# define IFLA_VXLAN_PORT 15
# define IFLA_VXLAN_GROUP6 16
# define IFLA_VXLAN_LOCAL6 17
# undef IFLA_VXLAN_MAX
# define IFLA_VXLAN_MAX IFLA_VXLAN_LOCAL6
2014-09-25 16:44:09 +02:00
/* older kernel header might not contain 'struct ifla_vxlan_port_range'.
* Redefine it . */
struct nm_ifla_vxlan_port_range {
guint16 low ;
guint16 high ;
} ;
2013-06-04 10:31:22 -03:00
static const struct nla_policy vxlan_info_policy [ IFLA_VXLAN_MAX + 1 ] = {
[ IFLA_VXLAN_ID ] = { . type = NLA_U32 } ,
[ IFLA_VXLAN_GROUP ] = { . type = NLA_U32 } ,
[ IFLA_VXLAN_GROUP6 ] = { . type = NLA_UNSPEC ,
. minlen = sizeof ( struct in6_addr ) } ,
[ IFLA_VXLAN_LINK ] = { . type = NLA_U32 } ,
[ IFLA_VXLAN_LOCAL ] = { . type = NLA_U32 } ,
[ IFLA_VXLAN_LOCAL6 ] = { . type = NLA_UNSPEC ,
. minlen = sizeof ( struct in6_addr ) } ,
[ IFLA_VXLAN_TOS ] = { . type = NLA_U8 } ,
[ IFLA_VXLAN_TTL ] = { . type = NLA_U8 } ,
[ IFLA_VXLAN_LEARNING ] = { . type = NLA_U8 } ,
[ IFLA_VXLAN_AGEING ] = { . type = NLA_U32 } ,
[ IFLA_VXLAN_LIMIT ] = { . type = NLA_U32 } ,
[ IFLA_VXLAN_PORT_RANGE ] = { . type = NLA_UNSPEC ,
2014-09-25 16:44:09 +02:00
. minlen = sizeof ( struct nm_ifla_vxlan_port_range ) } ,
2013-06-04 10:31:22 -03:00
[ IFLA_VXLAN_PROXY ] = { . type = NLA_U8 } ,
[ IFLA_VXLAN_RSC ] = { . type = NLA_U8 } ,
[ IFLA_VXLAN_L2MISS ] = { . type = NLA_U8 } ,
[ IFLA_VXLAN_L3MISS ] = { . type = NLA_U8 } ,
[ IFLA_VXLAN_PORT ] = { . type = NLA_U16 } ,
} ;
static int
vxlan_info_data_parser ( struct nlattr * info_data , gpointer parser_data )
{
NMPlatformVxlanProperties * props = parser_data ;
struct nlattr * tb [ IFLA_VXLAN_MAX + 1 ] ;
2014-09-25 16:44:09 +02:00
struct nm_ifla_vxlan_port_range * range ;
2013-06-04 10:31:22 -03:00
int err ;
err = nla_parse_nested ( tb , IFLA_VXLAN_MAX , info_data ,
( struct nla_policy * ) vxlan_info_policy ) ;
if ( err < 0 )
return err ;
memset ( props , 0 , sizeof ( * props ) ) ;
2014-09-25 16:44:09 +02:00
if ( tb [ IFLA_VXLAN_LINK ] )
props - > parent_ifindex = nla_get_u32 ( tb [ IFLA_VXLAN_LINK ] ) ;
if ( tb [ IFLA_VXLAN_ID ] )
props - > id = nla_get_u32 ( tb [ IFLA_VXLAN_ID ] ) ;
2013-06-04 10:31:22 -03:00
if ( tb [ IFLA_VXLAN_GROUP ] )
props - > group = nla_get_u32 ( tb [ IFLA_VXLAN_GROUP ] ) ;
if ( tb [ IFLA_VXLAN_LOCAL ] )
props - > local = nla_get_u32 ( tb [ IFLA_VXLAN_LOCAL ] ) ;
if ( tb [ IFLA_VXLAN_GROUP6 ] )
memcpy ( & props - > group6 , nla_data ( tb [ IFLA_VXLAN_GROUP6 ] ) , sizeof ( props - > group6 ) ) ;
if ( tb [ IFLA_VXLAN_LOCAL6 ] )
memcpy ( & props - > local6 , nla_data ( tb [ IFLA_VXLAN_LOCAL6 ] ) , sizeof ( props - > local6 ) ) ;
2014-09-25 16:44:09 +02:00
if ( tb [ IFLA_VXLAN_AGEING ] )
props - > ageing = nla_get_u32 ( tb [ IFLA_VXLAN_AGEING ] ) ;
if ( tb [ IFLA_VXLAN_LIMIT ] )
props - > limit = nla_get_u32 ( tb [ IFLA_VXLAN_LIMIT ] ) ;
if ( tb [ IFLA_VXLAN_TOS ] )
props - > tos = nla_get_u8 ( tb [ IFLA_VXLAN_TOS ] ) ;
if ( tb [ IFLA_VXLAN_TTL ] )
props - > ttl = nla_get_u8 ( tb [ IFLA_VXLAN_TTL ] ) ;
if ( tb [ IFLA_VXLAN_PORT ] )
props - > dst_port = nla_get_u16 ( tb [ IFLA_VXLAN_PORT ] ) ;
if ( tb [ IFLA_VXLAN_PORT_RANGE ] ) {
range = nla_data ( tb [ IFLA_VXLAN_PORT_RANGE ] ) ;
props - > src_port_min = range - > low ;
props - > src_port_max = range - > high ;
}
if ( tb [ IFLA_VXLAN_LEARNING ] )
props - > learning = ! ! nla_get_u8 ( tb [ IFLA_VXLAN_LEARNING ] ) ;
if ( tb [ IFLA_VXLAN_PROXY ] )
props - > proxy = ! ! nla_get_u8 ( tb [ IFLA_VXLAN_PROXY ] ) ;
if ( tb [ IFLA_VXLAN_RSC ] )
props - > rsc = ! ! nla_get_u8 ( tb [ IFLA_VXLAN_RSC ] ) ;
if ( tb [ IFLA_VXLAN_L2MISS ] )
props - > l2miss = ! ! nla_get_u8 ( tb [ IFLA_VXLAN_L2MISS ] ) ;
if ( tb [ IFLA_VXLAN_L3MISS ] )
props - > l3miss = ! ! nla_get_u8 ( tb [ IFLA_VXLAN_L3MISS ] ) ;
2013-06-04 10:31:22 -03:00
return 0 ;
}
static gboolean
vxlan_get_properties ( NMPlatform * platform , int ifindex , NMPlatformVxlanProperties * props )
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
int err ;
2015-05-29 09:38:26 +02:00
err = _nl_link_parse_info_data ( priv - > nlh , ifindex ,
vxlan_info_data_parser , props ) ;
2013-06-04 10:31:22 -03:00
if ( err ! = 0 ) {
2015-06-20 12:05:01 +02:00
warning ( " (%s) could not read vxlan properties: %s " ,
nm_platform_link_get_name ( platform , ifindex ) , nl_geterror ( err ) ) ;
2013-06-04 10:31:22 -03:00
}
return ( err = = 0 ) ;
}
2013-05-21 12:49:24 -03:00
static const struct nla_policy gre_info_policy [ IFLA_GRE_MAX + 1 ] = {
2014-03-03 10:08:23 -05:00
[ IFLA_GRE_LINK ] = { . type = NLA_U32 } ,
[ IFLA_GRE_IFLAGS ] = { . type = NLA_U16 } ,
[ IFLA_GRE_OFLAGS ] = { . type = NLA_U16 } ,
[ IFLA_GRE_IKEY ] = { . type = NLA_U32 } ,
[ IFLA_GRE_OKEY ] = { . type = NLA_U32 } ,
[ IFLA_GRE_LOCAL ] = { . type = NLA_U32 } ,
[ IFLA_GRE_REMOTE ] = { . type = NLA_U32 } ,
[ IFLA_GRE_TTL ] = { . type = NLA_U8 } ,
[ IFLA_GRE_TOS ] = { . type = NLA_U8 } ,
[ IFLA_GRE_PMTUDISC ] = { . type = NLA_U8 } ,
2013-05-21 12:49:24 -03:00
} ;
static int
gre_info_data_parser ( struct nlattr * info_data , gpointer parser_data )
{
NMPlatformGreProperties * props = parser_data ;
struct nlattr * tb [ IFLA_GRE_MAX + 1 ] ;
int err ;
err = nla_parse_nested ( tb , IFLA_GRE_MAX , info_data ,
( struct nla_policy * ) gre_info_policy ) ;
if ( err < 0 )
return err ;
props - > parent_ifindex = tb [ IFLA_GRE_LINK ] ? nla_get_u32 ( tb [ IFLA_GRE_LINK ] ) : 0 ;
props - > input_flags = nla_get_u16 ( tb [ IFLA_GRE_IFLAGS ] ) ;
props - > output_flags = nla_get_u16 ( tb [ IFLA_GRE_OFLAGS ] ) ;
props - > input_key = ( props - > input_flags & GRE_KEY ) ? nla_get_u32 ( tb [ IFLA_GRE_IKEY ] ) : 0 ;
props - > output_key = ( props - > output_flags & GRE_KEY ) ? nla_get_u32 ( tb [ IFLA_GRE_OKEY ] ) : 0 ;
props - > local = nla_get_u32 ( tb [ IFLA_GRE_LOCAL ] ) ;
props - > remote = nla_get_u32 ( tb [ IFLA_GRE_REMOTE ] ) ;
props - > tos = nla_get_u8 ( tb [ IFLA_GRE_TOS ] ) ;
props - > ttl = nla_get_u8 ( tb [ IFLA_GRE_TTL ] ) ;
2014-02-20 14:23:06 -05:00
props - > path_mtu_discovery = ! ! nla_get_u8 ( tb [ IFLA_GRE_PMTUDISC ] ) ;
2013-05-21 12:49:24 -03:00
return 0 ;
}
static gboolean
gre_get_properties ( NMPlatform * platform , int ifindex , NMPlatformGreProperties * props )
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
int err ;
2015-05-29 09:38:26 +02:00
err = _nl_link_parse_info_data ( priv - > nlh , ifindex ,
gre_info_data_parser , props ) ;
2014-02-20 14:40:35 -05:00
if ( err ! = 0 ) {
2015-06-20 12:05:01 +02:00
warning ( " (%s) could not read gre properties: %s " ,
nm_platform_link_get_name ( platform , ifindex ) , nl_geterror ( err ) ) ;
2014-02-20 14:40:35 -05:00
}
2013-05-21 12:49:24 -03:00
return ( err = = 0 ) ;
}
2014-02-04 14:27:03 +01:00
static WifiData *
wifi_get_wifi_data ( NMPlatform * platform , int ifindex )
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
WifiData * wifi_data ;
wifi_data = g_hash_table_lookup ( priv - > wifi_data , GINT_TO_POINTER ( ifindex ) ) ;
if ( ! wifi_data ) {
2015-06-20 12:05:01 +02:00
const NMPlatformLink * pllink ;
pllink = nm_platform_link_get ( platform , ifindex ) ;
if ( pllink ) {
if ( pllink - > type = = NM_LINK_TYPE_WIFI )
wifi_data = wifi_utils_init ( pllink - > name , ifindex , TRUE ) ;
else if ( pllink - > type = = NM_LINK_TYPE_OLPC_MESH ) {
/* The kernel driver now uses nl80211, but we force use of WEXT because
* the cfg80211 interactions are not quite ready to support access to
* mesh control through nl80211 just yet .
*/
2014-02-04 14:27:03 +01:00
# if HAVE_WEXT
2015-06-20 12:05:01 +02:00
wifi_data = wifi_wext_init ( pllink - > name , ifindex , FALSE ) ;
2014-02-04 14:27:03 +01:00
# endif
2015-06-20 12:05:01 +02:00
}
2014-02-04 14:27:03 +01:00
2015-06-20 12:05:01 +02:00
if ( wifi_data )
g_hash_table_insert ( priv - > wifi_data , GINT_TO_POINTER ( ifindex ) , wifi_data ) ;
}
2014-02-04 14:27:03 +01:00
}
return wifi_data ;
}
static gboolean
wifi_get_capabilities ( NMPlatform * platform , int ifindex , NMDeviceWifiCapabilities * caps )
{
WifiData * wifi_data = wifi_get_wifi_data ( platform , ifindex ) ;
if ( ! wifi_data )
return FALSE ;
if ( caps )
* caps = wifi_utils_get_caps ( wifi_data ) ;
return TRUE ;
}
static gboolean
2014-07-07 12:04:14 -04:00
wifi_get_bssid ( NMPlatform * platform , int ifindex , guint8 * bssid )
2014-02-04 14:27:03 +01:00
{
WifiData * wifi_data = wifi_get_wifi_data ( platform , ifindex ) ;
if ( ! wifi_data )
return FALSE ;
return wifi_utils_get_bssid ( wifi_data , bssid ) ;
}
static guint32
wifi_get_frequency ( NMPlatform * platform , int ifindex )
{
WifiData * wifi_data = wifi_get_wifi_data ( platform , ifindex ) ;
if ( ! wifi_data )
return 0 ;
return wifi_utils_get_freq ( wifi_data ) ;
}
static gboolean
wifi_get_quality ( NMPlatform * platform , int ifindex )
{
WifiData * wifi_data = wifi_get_wifi_data ( platform , ifindex ) ;
if ( ! wifi_data )
return FALSE ;
return wifi_utils_get_qual ( wifi_data ) ;
}
static guint32
wifi_get_rate ( NMPlatform * platform , int ifindex )
{
WifiData * wifi_data = wifi_get_wifi_data ( platform , ifindex ) ;
if ( ! wifi_data )
return FALSE ;
return wifi_utils_get_rate ( wifi_data ) ;
}
static NM80211Mode
wifi_get_mode ( NMPlatform * platform , int ifindex )
{
WifiData * wifi_data = wifi_get_wifi_data ( platform , ifindex ) ;
if ( ! wifi_data )
return NM_802_11_MODE_UNKNOWN ;
return wifi_utils_get_mode ( wifi_data ) ;
}
static void
wifi_set_mode ( NMPlatform * platform , int ifindex , NM80211Mode mode )
{
WifiData * wifi_data = wifi_get_wifi_data ( platform , ifindex ) ;
if ( wifi_data )
wifi_utils_set_mode ( wifi_data , mode ) ;
}
2014-10-23 14:19:59 -04:00
static void
wifi_set_powersave ( NMPlatform * platform , int ifindex , guint32 powersave )
{
WifiData * wifi_data = wifi_get_wifi_data ( platform , ifindex ) ;
if ( wifi_data )
wifi_utils_set_powersave ( wifi_data , powersave ) ;
}
2014-02-04 14:27:03 +01:00
static guint32
wifi_find_frequency ( NMPlatform * platform , int ifindex , const guint32 * freqs )
{
WifiData * wifi_data = wifi_get_wifi_data ( platform , ifindex ) ;
if ( ! wifi_data )
return 0 ;
return wifi_utils_find_freq ( wifi_data , freqs ) ;
}
static void
wifi_indicate_addressing_running ( NMPlatform * platform , int ifindex , gboolean running )
{
WifiData * wifi_data = wifi_get_wifi_data ( platform , ifindex ) ;
if ( wifi_data )
wifi_utils_indicate_addressing_running ( wifi_data , running ) ;
}
static guint32
mesh_get_channel ( NMPlatform * platform , int ifindex )
{
WifiData * wifi_data = wifi_get_wifi_data ( platform , ifindex ) ;
if ( ! wifi_data )
return 0 ;
return wifi_utils_get_mesh_channel ( wifi_data ) ;
}
static gboolean
mesh_set_channel ( NMPlatform * platform , int ifindex , guint32 channel )
{
WifiData * wifi_data = wifi_get_wifi_data ( platform , ifindex ) ;
if ( ! wifi_data )
return FALSE ;
return wifi_utils_set_mesh_channel ( wifi_data , channel ) ;
}
static gboolean
2014-06-26 10:42:11 -04:00
mesh_set_ssid ( NMPlatform * platform , int ifindex , const guint8 * ssid , gsize len )
2014-02-04 14:27:03 +01:00
{
WifiData * wifi_data = wifi_get_wifi_data ( platform , ifindex ) ;
if ( ! wifi_data )
return FALSE ;
2014-06-26 10:42:11 -04:00
return wifi_utils_set_mesh_ssid ( wifi_data , ssid , len ) ;
2014-02-04 14:27:03 +01:00
}
2014-02-05 11:56:44 +01:00
static gboolean
link_get_wake_on_lan ( NMPlatform * platform , int ifindex )
{
2015-06-20 12:05:01 +02:00
NMLinkType type = nm_platform_link_get_type ( platform , ifindex ) ;
2014-02-05 11:56:44 +01:00
2015-05-03 10:15:57 +02:00
if ( type = = NM_LINK_TYPE_ETHERNET )
2015-06-20 12:05:01 +02:00
return nmp_utils_ethtool_get_wake_on_lan ( nm_platform_link_get_name ( platform , ifindex ) ) ;
2015-05-03 10:15:57 +02:00
else if ( type = = NM_LINK_TYPE_WIFI ) {
2014-02-05 11:56:44 +01:00
WifiData * wifi_data = wifi_get_wifi_data ( platform , ifindex ) ;
if ( ! wifi_data )
return FALSE ;
return wifi_utils_get_wowlan ( wifi_data ) ;
} else
return FALSE ;
}
2014-10-03 13:41:49 -05:00
static gboolean
link_get_driver_info ( NMPlatform * platform ,
int ifindex ,
char * * out_driver_name ,
char * * out_driver_version ,
char * * out_fw_version )
{
2015-05-02 07:59:59 +02:00
return nmp_utils_ethtool_get_driver_info ( nm_platform_link_get_name ( platform , ifindex ) ,
out_driver_name ,
out_driver_version ,
out_fw_version ) ;
2014-10-03 13:41:49 -05:00
}
2013-03-27 22:23:24 +01:00
/******************************************************************/
2015-04-06 18:29:36 +02:00
static GArray *
2015-06-19 16:24:18 +02:00
ipx_address_get_all ( NMPlatform * platform , int ifindex , NMPObjectType obj_type )
2015-04-06 18:29:36 +02:00
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
2015-06-18 11:44:36 +02:00
2015-06-19 16:24:18 +02:00
nm_assert ( NM_IN_SET ( obj_type , NMP_OBJECT_TYPE_IP4_ADDRESS , NMP_OBJECT_TYPE_IP6_ADDRESS ) ) ;
2015-04-06 18:29:36 +02:00
return nmp_cache_lookup_multi_to_array ( priv - > cache ,
obj_type ,
2015-06-18 11:44:36 +02:00
nmp_cache_id_init_addrroute_visible_by_ifindex ( NMP_CACHE_ID_STATIC ,
obj_type ,
ifindex ) ) ;
2015-04-06 18:29:36 +02:00
}
2013-03-27 22:23:24 +01:00
static GArray *
ip4_address_get_all ( NMPlatform * platform , int ifindex )
{
2015-06-19 16:24:18 +02:00
return ipx_address_get_all ( platform , ifindex , NMP_OBJECT_TYPE_IP4_ADDRESS ) ;
2013-03-27 22:23:24 +01:00
}
static GArray *
ip6_address_get_all ( NMPlatform * platform , int ifindex )
{
2015-06-19 16:24:18 +02:00
return ipx_address_get_all ( platform , ifindex , NMP_OBJECT_TYPE_IP6_ADDRESS ) ;
2013-03-27 22:23:24 +01:00
}
2014-04-20 16:54:59 +02:00
# define IPV4LL_NETWORK (htonl (0xA9FE0000L))
# define IPV4LL_NETMASK (htonl (0xFFFF0000L))
static gboolean
ip4_is_link_local ( const struct in_addr * src )
{
return ( src - > s_addr & IPV4LL_NETMASK ) = = IPV4LL_NETWORK ;
}
2013-03-27 22:23:24 +01:00
static struct nl_object *
2015-04-21 11:14:27 +02:00
build_rtnl_addr ( NMPlatform * platform ,
int family ,
2013-12-02 10:20:26 -05:00
int ifindex ,
gconstpointer addr ,
gconstpointer peer_addr ,
int plen ,
guint32 lifetime ,
guint32 preferred ,
2014-02-19 16:10:59 -05:00
guint flags ,
const char * label )
2013-03-27 22:23:24 +01:00
{
2015-05-29 09:38:26 +02:00
auto_nl_object struct rtnl_addr * rtnladdr = _nl_rtnl_addr_alloc ( ifindex ) ;
2014-04-29 16:00:48 +02:00
struct rtnl_addr * rtnladdr_copy ;
2013-03-27 22:23:24 +01:00
int addrlen = family = = AF_INET ? sizeof ( in_addr_t ) : sizeof ( struct in6_addr ) ;
2015-05-29 09:38:26 +02:00
auto_nl_addr struct nl_addr * nladdr = _nl_addr_build ( family , addr , addrlen ) ;
2013-03-27 22:23:24 +01:00
int nle ;
2013-12-03 11:42:28 -06:00
/* IP address */
2013-03-27 22:23:24 +01:00
nle = rtnl_addr_set_local ( rtnladdr , nladdr ) ;
2014-04-29 16:00:48 +02:00
if ( nle ) {
error ( " build_rtnl_addr(): rtnl_addr_set_local failed with %s (%d) " , nl_geterror ( nle ) , nle ) ;
return NULL ;
}
2013-12-02 10:20:26 -05:00
2014-04-20 16:54:59 +02:00
/* Tighten scope (IPv4 only) */
if ( family = = AF_INET & & ip4_is_link_local ( addr ) )
2015-05-03 10:47:41 +02:00
rtnl_addr_set_scope ( rtnladdr , RT_SCOPE_LINK ) ;
2014-04-20 16:54:59 +02:00
2013-12-03 11:42:28 -06:00
/* IPv4 Broadcast address */
if ( family = = AF_INET ) {
2014-03-07 16:33:40 +01:00
in_addr_t bcast ;
2013-12-05 09:48:46 +01:00
auto_nl_addr struct nl_addr * bcaddr = NULL ;
2013-12-03 11:42:28 -06:00
2014-03-07 16:33:40 +01:00
bcast = * ( ( in_addr_t * ) addr ) | ~ nm_utils_ip4_prefix_to_netmask ( plen ) ;
2015-05-29 09:38:26 +02:00
bcaddr = _nl_addr_build ( family , & bcast , addrlen ) ;
2013-12-03 11:42:28 -06:00
g_assert ( bcaddr ) ;
rtnl_addr_set_broadcast ( rtnladdr , bcaddr ) ;
}
/* Peer/point-to-point address */
2013-12-02 10:20:26 -05:00
if ( peer_addr ) {
2015-05-29 09:38:26 +02:00
auto_nl_addr struct nl_addr * nlpeer = _nl_addr_build ( family , peer_addr , addrlen ) ;
2013-12-02 10:20:26 -05:00
nle = rtnl_addr_set_peer ( rtnladdr , nlpeer ) ;
2014-04-29 16:00:48 +02:00
if ( nle & & nle ! = - NLE_AF_NOSUPPORT ) {
/* IPv6 doesn't support peer addresses yet */
error ( " build_rtnl_addr(): rtnl_addr_set_peer failed with %s (%d) " , nl_geterror ( nle ) , nle ) ;
return NULL ;
}
2013-12-02 10:20:26 -05:00
}
2015-05-29 09:38:26 +02:00
_nl_rtnl_addr_set_prefixlen ( rtnladdr , plen ) ;
2015-07-14 12:51:49 +02:00
if ( ( lifetime ! = 0 & & lifetime ! = NM_PLATFORM_LIFETIME_PERMANENT )
| | ( preferred ! = 0 & & preferred ! = NM_PLATFORM_LIFETIME_PERMANENT ) ) {
2015-06-22 15:56:00 +02:00
/* note that here we set the relative timestamps (ticking from *now*). */
2013-06-29 11:30:11 +02:00
rtnl_addr_set_valid_lifetime ( rtnladdr , lifetime ) ;
rtnl_addr_set_preferred_lifetime ( rtnladdr , preferred ) ;
}
2014-04-04 16:14:46 +02:00
if ( flags ) {
2015-05-06 11:55:02 +02:00
if ( ( flags & ~ 0xFF ) & & ! _support_kernel_extended_ifa_flags_get ( ) ) {
2014-04-04 16:14:46 +02:00
/* Older kernels don't accept unknown netlink attributes.
*
* With commit libnl commit 5206 c050504f8676a24854519b9c351470fb7cc6 , libnl will only set
* the extended address flags attribute IFA_FLAGS when necessary ( > 8 bit ) . But it ' s up to
* us not to shove those extended flags on to older kernels .
*
* Just silently clear them . The kernel should ignore those unknown flags anyway . */
flags & = 0xFF ;
}
2013-10-15 20:44:59 +02:00
rtnl_addr_set_flags ( rtnladdr , flags ) ;
2014-04-04 16:14:46 +02:00
}
2014-02-19 16:10:59 -05:00
if ( label & & * label )
rtnl_addr_set_label ( rtnladdr , label ) ;
2013-03-27 22:23:24 +01:00
2014-04-29 16:00:48 +02:00
rtnladdr_copy = rtnladdr ;
rtnladdr = NULL ;
return ( struct nl_object * ) rtnladdr_copy ;
2013-03-27 22:23:24 +01:00
}
2015-04-14 23:14:06 +02:00
struct nl_object *
_nmp_vt_cmd_plobj_to_nl_ip4_address ( NMPlatform * platform , const NMPlatformObject * _obj , gboolean id_only )
{
const NMPlatformIP4Address * obj = ( const NMPlatformIP4Address * ) _obj ;
2015-06-22 15:56:00 +02:00
guint32 lifetime , preferred ;
nmp_utils_lifetime_get ( obj - > timestamp , obj - > lifetime , obj - > preferred ,
0 , 0 , & lifetime , & preferred ) ;
2015-04-14 23:14:06 +02:00
return build_rtnl_addr ( platform ,
AF_INET ,
obj - > ifindex ,
& obj - > address ,
obj - > peer_address ? & obj - > peer_address : NULL ,
obj - > plen ,
2015-06-22 15:56:00 +02:00
lifetime ,
preferred ,
2015-04-14 23:14:06 +02:00
0 ,
obj - > label [ 0 ] ? obj - > label : NULL ) ;
}
struct nl_object *
_nmp_vt_cmd_plobj_to_nl_ip6_address ( NMPlatform * platform , const NMPlatformObject * _obj , gboolean id_only )
{
const NMPlatformIP6Address * obj = ( const NMPlatformIP6Address * ) _obj ;
2015-06-22 15:56:00 +02:00
guint32 lifetime , preferred ;
nmp_utils_lifetime_get ( obj - > timestamp , obj - > lifetime , obj - > preferred ,
0 , 0 , & lifetime , & preferred ) ;
2015-04-14 23:14:06 +02:00
return build_rtnl_addr ( platform ,
AF_INET6 ,
obj - > ifindex ,
& obj - > address ,
! IN6_IS_ADDR_UNSPECIFIED ( & obj - > peer_address ) ? & obj - > peer_address : NULL ,
obj - > plen ,
2015-06-22 15:56:00 +02:00
lifetime ,
preferred ,
2015-04-14 23:14:06 +02:00
0 ,
NULL ) ;
}
2013-03-27 22:23:24 +01:00
static gboolean
2013-12-02 10:20:26 -05:00
ip4_address_add ( NMPlatform * platform ,
int ifindex ,
in_addr_t addr ,
in_addr_t peer_addr ,
int plen ,
guint32 lifetime ,
2014-02-19 16:10:59 -05:00
guint32 preferred ,
const char * label )
2013-03-27 22:23:24 +01:00
{
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
NMPObject obj_needle ;
auto_nl_object struct nl_object * nlo = NULL ;
nlo = build_rtnl_addr ( platform , AF_INET , ifindex , & addr ,
peer_addr ? & peer_addr : NULL ,
plen , lifetime , preferred , 0 ,
label ) ;
return do_add_addrroute ( platform ,
nmp_object_stackinit_id_ip4_address ( & obj_needle , ifindex , addr , plen ) ,
nlo ) ;
2013-03-27 22:23:24 +01:00
}
static gboolean
2013-12-02 10:20:26 -05:00
ip6_address_add ( NMPlatform * platform ,
int ifindex ,
struct in6_addr addr ,
struct in6_addr peer_addr ,
int plen ,
guint32 lifetime ,
guint32 preferred ,
guint flags )
2013-03-27 22:23:24 +01:00
{
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
NMPObject obj_needle ;
auto_nl_object struct nl_object * nlo = NULL ;
nlo = build_rtnl_addr ( platform , AF_INET6 , ifindex , & addr ,
IN6_IS_ADDR_UNSPECIFIED ( & peer_addr ) ? NULL : & peer_addr ,
plen , lifetime , preferred , flags ,
NULL ) ;
return do_add_addrroute ( platform ,
nmp_object_stackinit_id_ip6_address ( & obj_needle , ifindex , & addr , plen ) ,
nlo ) ;
2013-03-27 22:23:24 +01:00
}
static gboolean
2014-08-12 01:31:00 +02:00
ip4_address_delete ( NMPlatform * platform , int ifindex , in_addr_t addr , int plen , in_addr_t peer_address )
2013-03-27 22:23:24 +01:00
{
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
NMPObject obj_needle ;
nmp_object_stackinit_id_ip4_address ( & obj_needle , ifindex , addr , plen ) ;
obj_needle . ip4_address . peer_address = peer_address ;
return do_delete_object ( platform , & obj_needle , NULL ) ;
2013-03-27 22:23:24 +01:00
}
static gboolean
ip6_address_delete ( NMPlatform * platform , int ifindex , struct in6_addr addr , int plen )
{
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
NMPObject obj_needle ;
nmp_object_stackinit_id_ip6_address ( & obj_needle , ifindex , & addr , plen ) ;
return do_delete_object ( platform , & obj_needle , NULL ) ;
2013-03-27 22:23:24 +01:00
}
2015-07-14 12:37:58 +02:00
static const NMPlatformIP4Address *
ip4_address_get ( NMPlatform * platform , int ifindex , in_addr_t addr , int plen )
2013-03-27 22:23:24 +01:00
{
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
NMPObject obj_needle ;
2015-07-14 12:37:58 +02:00
const NMPObject * obj ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
nmp_object_stackinit_id_ip4_address ( & obj_needle , ifindex , addr , plen ) ;
2015-07-14 12:37:58 +02:00
obj = nmp_cache_lookup_obj ( NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) - > cache , & obj_needle ) ;
if ( nmp_object_is_visible ( obj ) )
return & obj - > ip4_address ;
return NULL ;
2013-03-27 22:23:24 +01:00
}
2015-07-14 12:37:58 +02:00
static const NMPlatformIP6Address *
ip6_address_get ( NMPlatform * platform , int ifindex , struct in6_addr addr , int plen )
2013-03-27 22:23:24 +01:00
{
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
NMPObject obj_needle ;
2015-07-14 12:37:58 +02:00
const NMPObject * obj ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
nmp_object_stackinit_id_ip6_address ( & obj_needle , ifindex , & addr , plen ) ;
2015-07-14 12:37:58 +02:00
obj = nmp_cache_lookup_obj ( NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) - > cache , & obj_needle ) ;
if ( nmp_object_is_visible ( obj ) )
return & obj - > ip6_address ;
return NULL ;
2013-03-27 22:23:24 +01:00
}
/******************************************************************/
2015-04-06 18:29:36 +02:00
static GArray *
2015-06-22 17:28:37 +02:00
ipx_route_get_all ( NMPlatform * platform , int ifindex , NMPObjectType obj_type , NMPlatformGetRouteFlags flags )
2015-04-06 18:29:36 +02:00
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
2015-06-22 17:08:58 +02:00
NMPCacheId cache_id ;
const NMPlatformIPRoute * const * routes ;
GArray * array ;
const NMPClass * klass ;
gboolean with_rtprot_kernel ;
guint i , len ;
2015-06-18 11:44:36 +02:00
2015-06-19 16:24:18 +02:00
nm_assert ( NM_IN_SET ( obj_type , NMP_OBJECT_TYPE_IP4_ROUTE , NMP_OBJECT_TYPE_IP6_ROUTE ) ) ;
2015-04-06 18:29:36 +02:00
2015-06-22 17:28:37 +02:00
if ( ! NM_FLAGS_ANY ( flags , NM_PLATFORM_GET_ROUTE_FLAGS_WITH_DEFAULT | NM_PLATFORM_GET_ROUTE_FLAGS_WITH_NON_DEFAULT ) )
flags | = NM_PLATFORM_GET_ROUTE_FLAGS_WITH_DEFAULT | NM_PLATFORM_GET_ROUTE_FLAGS_WITH_NON_DEFAULT ;
2015-04-06 18:29:36 +02:00
2015-06-22 17:08:58 +02:00
klass = nmp_class_from_type ( obj_type ) ;
nmp_cache_id_init_routes_visible ( & cache_id ,
obj_type ,
NM_FLAGS_HAS ( flags , NM_PLATFORM_GET_ROUTE_FLAGS_WITH_DEFAULT ) ,
NM_FLAGS_HAS ( flags , NM_PLATFORM_GET_ROUTE_FLAGS_WITH_NON_DEFAULT ) ,
ifindex ) ;
routes = ( const NMPlatformIPRoute * const * ) nmp_cache_lookup_multi ( priv - > cache , & cache_id , & len ) ;
array = g_array_sized_new ( FALSE , FALSE , klass - > sizeof_public , len ) ;
with_rtprot_kernel = NM_FLAGS_HAS ( flags , NM_PLATFORM_GET_ROUTE_FLAGS_WITH_RTPROT_KERNEL ) ;
for ( i = 0 ; i < len ; i + + ) {
nm_assert ( NMP_OBJECT_GET_CLASS ( NMP_OBJECT_UP_CAST ( routes [ i ] ) ) = = klass ) ;
if ( with_rtprot_kernel
| | routes [ i ] - > source ! = NM_IP_CONFIG_SOURCE_RTPROT_KERNEL )
g_array_append_vals ( array , routes [ i ] , 1 ) ;
}
return array ;
2015-04-06 18:29:36 +02:00
}
2013-03-27 22:23:24 +01:00
static GArray *
2015-06-22 17:28:37 +02:00
ip4_route_get_all ( NMPlatform * platform , int ifindex , NMPlatformGetRouteFlags flags )
2013-03-27 22:23:24 +01:00
{
2015-06-22 17:28:37 +02:00
return ipx_route_get_all ( platform , ifindex , NMP_OBJECT_TYPE_IP4_ROUTE , flags ) ;
2013-03-27 22:23:24 +01:00
}
static GArray *
2015-06-22 17:28:37 +02:00
ip6_route_get_all ( NMPlatform * platform , int ifindex , NMPlatformGetRouteFlags flags )
2013-03-27 22:23:24 +01:00
{
2015-06-22 17:28:37 +02:00
return ipx_route_get_all ( platform , ifindex , NMP_OBJECT_TYPE_IP6_ROUTE , flags ) ;
2013-03-27 22:23:24 +01:00
}
2014-02-13 19:47:04 +01:00
static void
clear_host_address ( int family , const void * network , int plen , void * dst )
{
g_return_if_fail ( plen = = ( guint8 ) plen ) ;
g_return_if_fail ( network ) ;
switch ( family ) {
case AF_INET :
* ( ( in_addr_t * ) dst ) = nm_utils_ip4_address_clear_host_address ( * ( ( in_addr_t * ) network ) , plen ) ;
break ;
case AF_INET6 :
nm_utils_ip6_address_clear_host_address ( ( struct in6_addr * ) dst , ( const struct in6_addr * ) network , plen ) ;
break ;
default :
g_assert_not_reached ( ) ;
}
}
2013-03-27 22:23:24 +01:00
static struct nl_object *
2014-10-13 11:52:29 +02:00
build_rtnl_route ( int family , int ifindex , NMIPConfigSource source ,
2014-05-15 15:19:59 -04:00
gconstpointer network , int plen , gconstpointer gateway ,
2014-11-11 14:19:12 +01:00
gconstpointer pref_src ,
2014-08-28 17:25:36 +02:00
guint32 metric , guint32 mss )
2013-03-27 22:23:24 +01:00
{
2014-02-13 19:47:04 +01:00
guint32 network_clean [ 4 ] ;
2014-04-29 16:00:48 +02:00
struct rtnl_route * rtnlroute ;
struct rtnl_nexthop * nexthop ;
2013-03-27 22:23:24 +01:00
int addrlen = ( family = = AF_INET ) ? sizeof ( in_addr_t ) : sizeof ( struct in6_addr ) ;
2013-06-19 15:35:53 +02:00
/* Workaround a libnl bug by using zero destination address length for default routes */
2014-02-13 19:47:04 +01:00
auto_nl_addr struct nl_addr * dst = NULL ;
2015-05-29 09:38:26 +02:00
auto_nl_addr struct nl_addr * gw = gateway ? _nl_addr_build ( family , gateway , addrlen ) : NULL ;
auto_nl_addr struct nl_addr * pref_src_nl = pref_src ? _nl_addr_build ( family , pref_src , addrlen ) : NULL ;
2013-03-27 22:23:24 +01:00
2014-02-13 19:47:04 +01:00
/* There seem to be problems adding a route with non-zero host identifier.
* Adding IPv6 routes is simply ignored , without error message .
* In the IPv4 case , we got an error . Thus , we have to make sure , that
* the address is sane . */
clear_host_address ( family , network , plen , network_clean ) ;
2015-05-29 09:38:26 +02:00
dst = _nl_addr_build ( family , network_clean , plen ? addrlen : 0 ) ;
2013-03-27 22:23:24 +01:00
nl_addr_set_prefixlen ( dst , plen ) ;
2015-05-29 09:38:26 +02:00
rtnlroute = _nl_rtnl_route_alloc ( ) ;
2013-03-27 22:23:24 +01:00
rtnl_route_set_table ( rtnlroute , RT_TABLE_MAIN ) ;
rtnl_route_set_tos ( rtnlroute , 0 ) ;
rtnl_route_set_dst ( rtnlroute , dst ) ;
rtnl_route_set_priority ( rtnlroute , metric ) ;
2014-02-13 15:11:05 +01:00
rtnl_route_set_family ( rtnlroute , family ) ;
2015-05-29 09:55:51 +02:00
rtnl_route_set_protocol ( rtnlroute , _nm_ip_config_source_to_rtprot ( source ) ) ;
2013-03-27 22:23:24 +01:00
2015-05-29 09:38:26 +02:00
nexthop = _nl_rtnl_route_nh_alloc ( ) ;
2013-03-27 22:23:24 +01:00
rtnl_route_nh_set_ifindex ( nexthop , ifindex ) ;
2013-05-02 08:06:08 +02:00
if ( gw & & ! nl_addr_iszero ( gw ) )
2013-03-27 22:23:24 +01:00
rtnl_route_nh_set_gateway ( nexthop , gw ) ;
2014-11-11 14:19:12 +01:00
if ( pref_src_nl )
rtnl_route_set_pref_src ( rtnlroute , pref_src_nl ) ;
2013-03-27 22:23:24 +01:00
rtnl_route_add_nexthop ( rtnlroute , nexthop ) ;
if ( mss > 0 )
rtnl_route_set_metric ( rtnlroute , RTAX_ADVMSS , mss ) ;
return ( struct nl_object * ) rtnlroute ;
}
2015-04-14 23:14:06 +02:00
struct nl_object *
_nmp_vt_cmd_plobj_to_nl_ip4_route ( NMPlatform * platform , const NMPlatformObject * _obj , gboolean id_only )
{
const NMPlatformIP4Route * obj = ( const NMPlatformIP4Route * ) _obj ;
return build_rtnl_route ( AF_INET ,
obj - > ifindex ,
obj - > source ,
& obj - > network ,
obj - > plen ,
& obj - > gateway ,
2015-06-22 13:22:48 +02:00
obj - > pref_src ? & obj - > pref_src : NULL ,
2015-04-14 23:14:06 +02:00
obj - > metric ,
obj - > mss ) ;
}
struct nl_object *
_nmp_vt_cmd_plobj_to_nl_ip6_route ( NMPlatform * platform , const NMPlatformObject * _obj , gboolean id_only )
{
const NMPlatformIP6Route * obj = ( const NMPlatformIP6Route * ) _obj ;
return build_rtnl_route ( AF_INET6 ,
obj - > ifindex ,
obj - > source ,
& obj - > network ,
obj - > plen ,
& obj - > gateway ,
NULL ,
obj - > metric ,
obj - > mss ) ;
}
2013-03-27 22:23:24 +01:00
static gboolean
2014-10-13 11:52:29 +02:00
ip4_route_add ( NMPlatform * platform , int ifindex , NMIPConfigSource source ,
2014-05-15 15:19:59 -04:00
in_addr_t network , int plen , in_addr_t gateway ,
2014-11-11 14:19:12 +01:00
guint32 pref_src , guint32 metric , guint32 mss )
2013-03-27 22:23:24 +01:00
{
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
NMPObject obj_needle ;
auto_nl_object struct nl_object * nlo = NULL ;
nlo = build_rtnl_route ( AF_INET , ifindex , source , & network , plen , & gateway , pref_src ? & pref_src : NULL , metric , mss ) ;
return do_add_addrroute ( platform ,
nmp_object_stackinit_id_ip4_route ( & obj_needle , ifindex , network , plen , metric ) ,
nlo ) ;
2013-03-27 22:23:24 +01:00
}
static gboolean
2014-10-13 11:52:29 +02:00
ip6_route_add ( NMPlatform * platform , int ifindex , NMIPConfigSource source ,
2014-05-15 15:19:59 -04:00
struct in6_addr network , int plen , struct in6_addr gateway ,
2014-08-28 17:25:36 +02:00
guint32 metric , guint32 mss )
2013-03-27 22:23:24 +01:00
{
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
NMPObject obj_needle ;
auto_nl_object struct nl_object * nlo = NULL ;
2014-12-22 17:06:13 +01:00
metric = nm_utils_ip6_route_metric_normalize ( metric ) ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
nlo = build_rtnl_route ( AF_INET6 , ifindex , source , & network , plen , & gateway , NULL , metric , mss ) ;
return do_add_addrroute ( platform ,
nmp_object_stackinit_id_ip6_route ( & obj_needle , ifindex , & network , plen , metric ) ,
nlo ) ;
2013-03-27 22:23:24 +01:00
}
static gboolean
2014-08-28 17:25:36 +02:00
ip4_route_delete ( NMPlatform * platform , int ifindex , in_addr_t network , int plen , guint32 metric )
2013-03-27 22:23:24 +01:00
{
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
2013-05-02 08:06:08 +02:00
in_addr_t gateway = 0 ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
auto_nl_object struct nl_object * nlo = build_rtnl_route ( AF_INET , ifindex , NM_IP_CONFIG_SOURCE_UNKNOWN , & network , plen , & gateway , NULL , metric , 0 ) ;
2014-02-23 14:57:50 +01:00
uint8_t scope = RT_SCOPE_NOWHERE ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
const NMPObject * obj ;
NMPObject obj_needle ;
2014-02-13 15:11:05 +01:00
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
g_return_val_if_fail ( nlo , FALSE ) ;
2014-02-13 15:11:05 +01:00
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
nmp_object_stackinit_id_ip4_route ( & obj_needle , ifindex , network , plen , metric ) ;
platform: fix lookup of routes and deletion of IPv4 routes
When doing a lookup for an libnl route, the cache comparison function
for routes takes into account 'family', 'tos', 'table', 'dst', and 'prio'.
In NetworkManager we don't use all of these properties for a route, so
at several places when doing a cache lookup we don't have all identifying
properties. Usually we only have 'family' and 'dst' ('table' is
implicit 0, because NM does currently not care about any other tables).
The problem is that NM sees routes with different 'tos', 'prio', but it
cannot look them up in the cache. Add a hack to search the cache
fuzzy.
This is similar to the hack for link, where the identifying properties
are 'family' and 'ifindex', but we only have 'ifindex' at hand. However,
contrary to this hack, we coerce the 'family' to AF_UNSPEC for every link cache
operation. This is not viable in this case, because we internally need
the 'tos' field.
We need the 'tos' field because when deleting an IPv4 route, the 'tos' field must
match. See fib_table_delete(). This was already partially fixed by commit
f0daf90298d1bd9cafac7b9c02dc905327e0b85a, but before the lookup to the
cached object would fail for any non-zero 'tos'.
Signed-off-by: Thomas Haller <thaller@redhat.com>
2014-05-28 18:46:12 +02:00
2014-12-22 17:59:16 +01:00
if ( metric = = 0 ) {
/* Deleting an IPv4 route with metric 0 does not only delete an exectly matching route.
* If no route with metric 0 exists , it might delete another route to the same destination .
* For nm_platform_ip4_route_delete ( ) we don ' t want this semantic .
*
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
* Instead , make sure that we have the most recent state and process all
* delayed actions ( including re - reading data from netlink ) . */
delayed_action_handle_all ( platform , TRUE ) ;
}
obj = nmp_cache_lookup_obj ( priv - > cache , & obj_needle ) ;
if ( metric = = 0 & & ! obj ) {
/* hmm... we are about to delete an IP4 route with metric 0. We must only
* send the delete request if such a route really exists . Above we refreshed
* the platform cache , still no such route exists .
*
* Be extra careful and reload the routes . We must be sure that such a
* route doesn ' t exists , because when we add an IPv4 address , we immediately
* afterwards try to delete the kernel - added device route with metric 0.
* It might be , that we didn ' t yet get the notification about that route .
*
* FIXME : once our ip4_address_add ( ) is sure that upon return we have
* the latest state from in the platform cache , we might save this
* additional expensive cache - resync . */
2015-06-19 16:24:18 +02:00
do_request_one_type ( platform , NMP_OBJECT_TYPE_IP4_ROUTE , TRUE ) ;
2014-12-22 17:59:16 +01:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
obj = nmp_cache_lookup_obj ( priv - > cache , & obj_needle ) ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
if ( ! obj )
return TRUE ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
}
2014-05-27 21:03:41 +02:00
2014-02-23 14:57:50 +01:00
if ( ! _nl_has_capability ( 1 /* NL_CAPABILITY_ROUTE_BUILD_MSG_SET_SCOPE */ ) ) {
/* When searching for a matching IPv4 route to delete, the kernel
* searches for a matching scope , unless the RTM_DELROUTE message
* specifies RT_SCOPE_NOWHERE ( see fib_table_delete ( ) ) .
*
* However , if we set the scope of @ rtnlroute to RT_SCOPE_NOWHERE ( or
* leave it unset ) , rtnl_route_build_msg ( ) will reset the scope to
* rtnl_route_guess_scope ( ) - - which probably guesses wrong .
*
* As a workaround , we look at the cached route and use that scope .
*
* Newer versions of libnl , no longer reset the scope if explicitly set to RT_SCOPE_NOWHERE .
* So , this workaround is only needed unless we have NL_CAPABILITY_ROUTE_BUILD_MSG_SET_SCOPE .
* */
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
if ( obj )
scope = nm_platform_route_scope_inv ( obj - > ip4_route . scope_inv ) ;
2014-02-23 14:57:50 +01:00
if ( scope = = RT_SCOPE_NOWHERE ) {
/* If we would set the scope to RT_SCOPE_NOWHERE, libnl would guess the scope.
2014-05-27 21:03:41 +02:00
* But probably it will guess ' link ' because we set the next hop of the route
* to zero ( 0.0 .0 .0 ) . A better guess is ' global ' . */
2014-02-23 14:57:50 +01:00
scope = RT_SCOPE_UNIVERSE ;
}
}
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
rtnl_route_set_scope ( ( struct rtnl_route * ) nlo , scope ) ;
2013-05-02 08:06:08 +02:00
2015-04-28 12:54:23 +02:00
/* we only support routes with TOS zero. As such, delete_route() is also only able to delete
* routes with tos = = 0. build_rtnl_route ( ) already initializes tos properly . */
2014-05-27 21:03:41 +02:00
/* The following fields are also relevant when comparing the route, but the default values
* are already as we want them :
*
* type : RTN_UNICAST ( setting to zero would ignore the type , but we only want to delete RTN_UNICAST )
* pref_src : NULL
*/
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
return do_delete_object ( platform , & obj_needle , nlo ) ;
2013-03-27 22:23:24 +01:00
}
static gboolean
2014-08-28 17:25:36 +02:00
ip6_route_delete ( NMPlatform * platform , int ifindex , struct in6_addr network , int plen , guint32 metric )
2013-03-27 22:23:24 +01:00
{
2014-02-13 19:47:04 +01:00
struct in6_addr gateway = IN6ADDR_ANY_INIT ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
auto_nl_object struct nl_object * nlo = NULL ;
NMPObject obj_needle ;
2013-05-02 08:06:08 +02:00
2014-12-22 17:06:13 +01:00
metric = nm_utils_ip6_route_metric_normalize ( metric ) ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
nlo = build_rtnl_route ( AF_INET6 , ifindex , NM_IP_CONFIG_SOURCE_UNKNOWN , & network , plen , & gateway , NULL , metric , 0 ) ;
nmp_object_stackinit_id_ip6_route ( & obj_needle , ifindex , & network , plen , metric ) ;
return do_delete_object ( platform , & obj_needle , nlo ) ;
2013-03-27 22:23:24 +01:00
}
2015-07-14 12:37:58 +02:00
static const NMPlatformIP4Route *
ip4_route_get ( NMPlatform * platform , int ifindex , in_addr_t network , int plen , guint32 metric )
2013-03-27 22:23:24 +01:00
{
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
NMPObject obj_needle ;
2015-07-14 12:37:58 +02:00
const NMPObject * obj ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
nmp_object_stackinit_id_ip4_route ( & obj_needle , ifindex , network , plen , metric ) ;
2015-07-14 12:37:58 +02:00
obj = nmp_cache_lookup_obj ( NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) - > cache , & obj_needle ) ;
if ( nmp_object_is_visible ( obj ) )
return & obj - > ip4_route ;
return NULL ;
2013-03-27 22:23:24 +01:00
}
2015-07-14 12:37:58 +02:00
static const NMPlatformIP6Route *
ip6_route_get ( NMPlatform * platform , int ifindex , struct in6_addr network , int plen , guint32 metric )
2013-03-27 22:23:24 +01:00
{
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
NMPObject obj_needle ;
2015-07-14 12:37:58 +02:00
const NMPObject * obj ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
2014-12-22 17:06:13 +01:00
metric = nm_utils_ip6_route_metric_normalize ( metric ) ;
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
nmp_object_stackinit_id_ip6_route ( & obj_needle , ifindex , & network , plen , metric ) ;
2015-07-14 12:37:58 +02:00
obj = nmp_cache_lookup_obj ( NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) - > cache , & obj_needle ) ;
if ( nmp_object_is_visible ( obj ) )
return & obj - > ip6_route ;
return NULL ;
2013-03-27 22:23:24 +01:00
}
/******************************************************************/
2013-03-27 22:23:24 +01:00
# define EVENT_CONDITIONS ((GIOCondition) (G_IO_IN | G_IO_PRI))
# define ERROR_CONDITIONS ((GIOCondition) (G_IO_ERR | G_IO_NVAL))
# define DISCONNECT_CONDITIONS ((GIOCondition) (G_IO_HUP))
static int
2015-08-30 15:58:52 +02:00
verify_source ( struct nl_msg * msg , NMPlatform * platform )
2013-03-27 22:23:24 +01:00
{
struct ucred * creds = nlmsg_get_creds ( msg ) ;
2015-05-15 11:52:24 +02:00
if ( ! creds | | creds - > pid ) {
2013-03-27 22:23:24 +01:00
if ( creds )
2015-08-30 15:58:52 +02:00
_LOGW ( " netlink: received non-kernel message (pid %d) " , creds - > pid ) ;
2013-03-27 22:23:24 +01:00
else
2015-08-30 15:58:52 +02:00
_LOGW ( " netlink: received message without credentials " ) ;
2013-03-27 22:23:24 +01:00
return NL_STOP ;
}
return NL_OK ;
}
static gboolean
event_handler ( GIOChannel * channel ,
2014-03-04 18:07:05 -05:00
GIOCondition io_condition ,
gpointer user_data )
2013-03-27 22:23:24 +01:00
{
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
delayed_action_handle_all ( NM_PLATFORM ( user_data ) , TRUE ) ;
return TRUE ;
}
static gboolean
event_handler_read_netlink_one ( NMPlatform * platform )
{
2014-12-08 18:08:42 +01:00
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
2013-03-27 22:23:24 +01:00
int nle ;
nle = nl_recvmsgs_default ( priv - > nlh_event ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
/* Work around a libnl bug fixed in 3.2.22 (375a6294) */
if ( nle = = 0 & & ( errno = = EAGAIN | | errno = = EWOULDBLOCK ) )
nle = - NLE_AGAIN ;
2014-02-25 16:19:43 +01:00
if ( nle < 0 )
switch ( nle ) {
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
case - NLE_AGAIN :
return FALSE ;
2014-03-04 18:07:05 -05:00
case - NLE_DUMP_INTR :
2014-02-25 16:19:43 +01:00
debug ( " Uncritical failure to retrieve incoming events: %s (%d) " , nl_geterror ( nle ) , nle ) ;
break ;
2014-12-08 18:08:42 +01:00
case - NLE_NOMEM :
2015-08-20 17:50:36 +02:00
info ( " Too many netlink events. Need to resynchronize platform cache " ) ;
2014-12-08 18:08:42 +01:00
/* Drain the event queue, we've lost events and are out of sync anyway and we'd
* like to free up some space . We ' ll read in the status synchronously . */
2015-05-07 10:16:15 +02:00
_nl_sock_flush_data ( priv - > nlh_event ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
priv - > nlh_seq_expect = 0 ;
delayed_action_schedule ( platform ,
DELAYED_ACTION_TYPE_REFRESH_ALL_LINKS |
DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ADDRESSES |
DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ADDRESSES |
DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ROUTES |
DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ROUTES ,
NULL ) ;
2014-12-08 18:08:42 +01:00
break ;
2014-02-25 16:19:43 +01:00
default :
error ( " Failed to retrieve incoming events: %s (%d) " , nl_geterror ( nle ) , nle ) ;
break ;
}
2013-03-27 22:23:24 +01:00
return TRUE ;
}
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
static gboolean
event_handler_read_netlink_all ( NMPlatform * platform , gboolean wait_for_acks )
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
int r ;
struct pollfd pfd ;
gboolean any = FALSE ;
gint64 timestamp = 0 , now ;
const int TIMEOUT = 250 ;
int timeout = 0 ;
guint32 wait_for_seq = 0 ;
while ( TRUE ) {
while ( event_handler_read_netlink_one ( platform ) )
any = TRUE ;
if ( ! wait_for_acks | | priv - > nlh_seq_expect = = 0 ) {
if ( wait_for_seq )
_LOGT ( " read-netlink-all: ACK for sequence number %u received " , priv - > nlh_seq_expect ) ;
return any ;
}
now = nm_utils_get_monotonic_timestamp_ms ( ) ;
if ( wait_for_seq ! = priv - > nlh_seq_expect ) {
/* We are waiting for a new sequence number (or we will wait for the first time).
* Reset / start counting the overall wait time . */
_LOGT ( " read-netlink-all: wait for ACK for sequence number %u... " , priv - > nlh_seq_expect ) ;
wait_for_seq = priv - > nlh_seq_expect ;
timestamp = now ;
timeout = TIMEOUT ;
} else {
if ( ( now - timestamp ) > = TIMEOUT ) {
/* timeout. Don't wait for this sequence number anymore. */
break ;
}
/* readjust the wait-time. */
timeout = TIMEOUT - ( now - timestamp ) ;
}
memset ( & pfd , 0 , sizeof ( pfd ) ) ;
pfd . fd = nl_socket_get_fd ( priv - > nlh_event ) ;
pfd . events = POLLIN ;
r = poll ( & pfd , 1 , timeout ) ;
if ( r = = 0 ) {
/* timeout. */
break ;
}
if ( r < 0 ) {
int errsv = errno ;
if ( errsv ! = EINTR ) {
_LOGE ( " read-netlink-all: poll failed with %s " , strerror ( errsv ) ) ;
return any ;
}
/* Continue to read again, even if there might be nothing to read after EINTR. */
}
}
_LOGW ( " read-netlink-all: timeout waiting for ACK to sequence number %u... " , wait_for_seq ) ;
priv - > nlh_seq_expect = 0 ;
return any ;
}
2013-03-27 22:23:24 +01:00
static struct nl_sock *
2015-08-30 15:58:52 +02:00
setup_socket ( NMPlatform * platform , gboolean event )
2013-03-27 22:23:24 +01:00
{
struct nl_sock * sock ;
int nle ;
sock = nl_socket_alloc ( ) ;
g_return_val_if_fail ( sock , NULL ) ;
/* Only ever accept messages from kernel */
2015-08-30 15:58:52 +02:00
nle = nl_socket_modify_cb ( sock , NL_CB_MSG_IN , NL_CB_CUSTOM , ( nl_recvmsg_msg_cb_t ) verify_source , platform ) ;
2013-03-27 22:23:24 +01:00
g_assert ( ! nle ) ;
/* Dispatch event messages (event socket only) */
if ( event ) {
2015-08-30 15:58:52 +02:00
nl_socket_modify_cb ( sock , NL_CB_VALID , NL_CB_CUSTOM , event_notification , platform ) ;
nl_socket_modify_cb ( sock , NL_CB_SEQ_CHECK , NL_CB_CUSTOM , event_seq_check , platform ) ;
nl_socket_modify_err_cb ( sock , NL_CB_CUSTOM , event_err , platform ) ;
2013-03-27 22:23:24 +01:00
}
nle = nl_connect ( sock , NETLINK_ROUTE ) ;
g_assert ( ! nle ) ;
nle = nl_socket_set_passcred ( sock , 1 ) ;
g_assert ( ! nle ) ;
2014-12-08 18:08:42 +01:00
/* No blocking for event socket, so that we can drain it safely. */
if ( event ) {
nle = nl_socket_set_nonblocking ( sock ) ;
g_assert ( ! nle ) ;
}
2013-03-27 22:23:24 +01:00
return sock ;
}
/******************************************************************/
2015-04-06 18:29:36 +02:00
static void
cache_update_link_udev ( NMPlatform * platform , int ifindex , GUdevDevice * udev_device )
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
auto_nmp_obj NMPObject * obj_cache = NULL ;
gboolean was_visible ;
NMPCacheOpsType cache_op ;
cache_op = nmp_cache_update_link_udev ( priv - > cache , ifindex , udev_device , & obj_cache , & was_visible , cache_pre_hook , platform ) ;
do_emit_signal ( platform , obj_cache , cache_op , was_visible , NM_PLATFORM_REASON_INTERNAL ) ;
}
2013-05-29 12:00:50 -03:00
static void
udev_device_added ( NMPlatform * platform ,
GUdevDevice * udev_device )
{
2013-08-07 12:35:05 -05:00
const char * ifname ;
2013-05-29 12:00:50 -03:00
int ifindex ;
ifname = g_udev_device_get_name ( udev_device ) ;
if ( ! ifname ) {
2014-04-17 14:57:55 +02:00
debug ( " udev-add: failed to get device's interface " ) ;
2013-05-29 12:00:50 -03:00
return ;
}
2013-10-16 18:24:59 +02:00
if ( g_udev_device_get_property ( udev_device , " IFINDEX " ) )
ifindex = g_udev_device_get_property_as_int ( udev_device , " IFINDEX " ) ;
2013-05-29 12:00:50 -03:00
else {
2014-04-17 14:57:55 +02:00
warning ( " (%s): udev-add: failed to get device's ifindex " , ifname ) ;
return ;
}
if ( ifindex < = 0 ) {
warning ( " (%s): udev-add: retrieved invalid IFINDEX=%d " , ifname , ifindex ) ;
2013-05-29 12:00:50 -03:00
return ;
}
if ( ! g_udev_device_get_sysfs_path ( udev_device ) ) {
2014-04-17 14:57:55 +02:00
debug ( " (%s): udev-add: couldn't determine device path; ignoring... " , ifname ) ;
2013-05-29 12:00:50 -03:00
return ;
}
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
cache_update_link_udev ( platform , ifindex , udev_device ) ;
}
2014-04-17 14:57:55 +02:00
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
static gboolean
_udev_device_removed_match_link ( const NMPObject * obj , gpointer udev_device )
{
return obj - > _link . udev . device = = udev_device ;
2013-05-29 12:00:50 -03:00
}
static void
udev_device_removed ( NMPlatform * platform ,
GUdevDevice * udev_device )
{
2013-07-26 17:03:39 +02:00
int ifindex = 0 ;
2013-05-29 12:00:50 -03:00
2014-04-17 14:57:55 +02:00
if ( g_udev_device_get_property ( udev_device , " IFINDEX " ) )
2013-10-16 18:24:59 +02:00
ifindex = g_udev_device_get_property_as_int ( udev_device , " IFINDEX " ) ;
2014-04-17 14:57:55 +02:00
else {
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
const NMPObject * obj ;
2013-05-29 12:00:50 -03:00
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
obj = nmp_cache_lookup_link_full ( NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) - > cache ,
0 , NULL , FALSE , NM_LINK_TYPE_NONE , _udev_device_removed_match_link , udev_device ) ;
if ( obj )
ifindex = obj - > link . ifindex ;
2013-05-29 12:00:50 -03:00
}
2013-07-26 17:03:39 +02:00
2014-04-17 14:57:55 +02:00
debug ( " udev-remove: IFINDEX=%d " , ifindex ) ;
if ( ifindex < = 0 )
return ;
2013-07-26 17:03:39 +02:00
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
cache_update_link_udev ( platform , ifindex , NULL ) ;
2013-05-29 12:00:50 -03:00
}
static void
handle_udev_event ( GUdevClient * client ,
const char * action ,
GUdevDevice * udev_device ,
gpointer user_data )
{
NMPlatform * platform = NM_PLATFORM ( user_data ) ;
const char * subsys ;
2013-10-15 19:45:42 +02:00
const char * ifindex ;
2013-10-16 18:24:59 +02:00
guint64 seqnum ;
2013-05-29 12:00:50 -03:00
g_return_if_fail ( action ! = NULL ) ;
/* A bit paranoid */
subsys = g_udev_device_get_subsystem ( udev_device ) ;
g_return_if_fail ( ! g_strcmp0 ( subsys , " net " ) ) ;
2013-10-16 18:24:59 +02:00
ifindex = g_udev_device_get_property ( udev_device , " IFINDEX " ) ;
seqnum = g_udev_device_get_seqnum ( udev_device ) ;
debug ( " UDEV event: action '%s' subsys '%s' device '%s' (%s); seqnum=% " G_GUINT64_FORMAT ,
2013-10-15 19:45:42 +02:00
action , subsys , g_udev_device_get_name ( udev_device ) ,
2013-10-16 18:24:59 +02:00
ifindex ? ifindex : " unknown " , seqnum ) ;
2013-05-29 12:00:50 -03:00
2014-03-07 13:30:30 +01:00
if ( ! strcmp ( action , " add " ) | | ! strcmp ( action , " move " ) )
2013-05-29 12:00:50 -03:00
udev_device_added ( platform , udev_device ) ;
if ( ! strcmp ( action , " remove " ) )
udev_device_removed ( platform , udev_device ) ;
}
/******************************************************************/
2013-03-27 22:23:24 +01:00
static void
2015-05-10 09:16:31 +02:00
nm_linux_platform_init ( NMLinuxPlatform * self )
2013-03-27 22:23:24 +01:00
{
2015-05-10 09:16:31 +02:00
NMLinuxPlatformPrivate * priv = G_TYPE_INSTANCE_GET_PRIVATE ( self , NM_TYPE_LINUX_PLATFORM , NMLinuxPlatformPrivate ) ;
self - > priv = priv ;
2015-04-06 18:29:36 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
priv - > delayed_deletion = g_hash_table_new_full ( ( GHashFunc ) nmp_object_id_hash ,
( GEqualFunc ) nmp_object_id_equal ,
( GDestroyNotify ) nmp_object_unref ,
( GDestroyNotify ) nmp_object_unref ) ;
2015-04-06 18:29:36 +02:00
priv - > cache = nmp_cache_new ( ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
priv - > delayed_action . list_master_connected = g_ptr_array_new ( ) ;
priv - > delayed_action . list_refresh_link = g_ptr_array_new ( ) ;
2015-05-12 07:14:55 +02:00
priv - > wifi_data = g_hash_table_new_full ( NULL , NULL , NULL , ( GDestroyNotify ) wifi_utils_deinit ) ;
2013-03-27 22:23:24 +01:00
}
2015-04-18 12:53:45 +02:00
static void
constructed ( GObject * _object )
2013-03-27 22:23:24 +01:00
{
2015-04-18 12:53:45 +02:00
NMPlatform * platform = NM_PLATFORM ( _object ) ;
2013-03-27 22:23:24 +01:00
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
2013-05-29 12:00:50 -03:00
const char * udev_subsys [ ] = { " net " , NULL } ;
2013-03-27 22:23:24 +01:00
int channel_flags ;
gboolean status ;
int nle ;
2015-05-04 15:55:12 +02:00
GUdevEnumerator * enumerator ;
GList * devices , * iter ;
2013-03-27 22:23:24 +01:00
2015-05-12 07:14:55 +02:00
_LOGD ( " create " ) ;
2013-03-27 22:23:24 +01:00
/* Initialize netlink socket for requests */
2015-08-30 15:58:52 +02:00
priv - > nlh = setup_socket ( platform , FALSE ) ;
2013-03-27 22:23:24 +01:00
g_assert ( priv - > nlh ) ;
2015-05-12 07:14:55 +02:00
debug ( " Netlink socket for requests established: port=%u, fd=%d " , nl_socket_get_local_port ( priv - > nlh ) , nl_socket_get_fd ( priv - > nlh ) ) ;
2013-03-27 22:23:24 +01:00
/* Initialize netlink socket for events */
2015-08-30 15:58:52 +02:00
priv - > nlh_event = setup_socket ( platform , TRUE ) ;
2013-03-27 22:23:24 +01:00
g_assert ( priv - > nlh_event ) ;
/* The default buffer size wasn't enough for the testsuites. It might just
* as well happen with NetworkManager itself . For now let ' s hope 128 KB is
* good enough .
*/
nle = nl_socket_set_buffer_size ( priv - > nlh_event , 131072 , 0 ) ;
g_assert ( ! nle ) ;
nle = nl_socket_add_memberships ( priv - > nlh_event ,
2014-01-28 14:23:31 +01:00
RTNLGRP_LINK ,
RTNLGRP_IPV4_IFADDR , RTNLGRP_IPV6_IFADDR ,
RTNLGRP_IPV4_ROUTE , RTNLGRP_IPV6_ROUTE ,
0 ) ;
2013-03-27 22:23:24 +01:00
g_assert ( ! nle ) ;
2015-05-12 07:14:55 +02:00
debug ( " Netlink socket for events established: port=%u, fd=%d " , nl_socket_get_local_port ( priv - > nlh_event ) , nl_socket_get_fd ( priv - > nlh_event ) ) ;
2013-03-27 22:23:24 +01:00
priv - > event_channel = g_io_channel_unix_new ( nl_socket_get_fd ( priv - > nlh_event ) ) ;
g_io_channel_set_encoding ( priv - > event_channel , NULL , NULL ) ;
g_io_channel_set_close_on_unref ( priv - > event_channel , TRUE ) ;
channel_flags = g_io_channel_get_flags ( priv - > event_channel ) ;
status = g_io_channel_set_flags ( priv - > event_channel ,
channel_flags | G_IO_FLAG_NONBLOCK , NULL ) ;
g_assert ( status ) ;
priv - > event_id = g_io_add_watch ( priv - > event_channel ,
2015-05-12 07:14:55 +02:00
( EVENT_CONDITIONS | ERROR_CONDITIONS | DISCONNECT_CONDITIONS ) ,
event_handler , platform ) ;
2014-07-24 15:57:08 -05:00
2013-05-29 12:00:50 -03:00
/* Set up udev monitoring */
priv - > udev_client = g_udev_client_new ( udev_subsys ) ;
g_signal_connect ( priv - > udev_client , " uevent " , G_CALLBACK ( handle_udev_event ) , platform ) ;
2015-05-12 07:14:55 +02:00
/* complete construction of the GObject instance before populating the cache. */
G_OBJECT_CLASS ( nm_linux_platform_parent_class ) - > constructed ( _object ) ;
_LOGD ( " populate platform cache " ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
delayed_action_schedule ( platform ,
DELAYED_ACTION_TYPE_REFRESH_ALL_LINKS |
DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ADDRESSES |
DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ADDRESSES |
DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ROUTES |
DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ROUTES ,
NULL ) ;
2015-05-12 07:14:55 +02:00
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
delayed_action_handle_all ( platform , FALSE ) ;
2015-04-13 16:29:37 -05:00
2013-05-29 12:00:50 -03:00
/* And read initial device list */
enumerator = g_udev_enumerator_new ( priv - > udev_client ) ;
g_udev_enumerator_add_match_subsystem ( enumerator , " net " ) ;
2014-11-21 15:02:37 +01:00
2015-05-11 17:04:11 +02:00
g_udev_enumerator_add_match_is_initialized ( enumerator ) ;
2013-05-29 12:00:50 -03:00
devices = g_udev_enumerator_execute ( enumerator ) ;
for ( iter = devices ; iter ; iter = g_list_next ( iter ) ) {
2013-06-06 11:41:30 -05:00
udev_device_added ( platform , G_UDEV_DEVICE ( iter - > data ) ) ;
2013-05-29 12:00:50 -03:00
g_object_unref ( G_UDEV_DEVICE ( iter - > data ) ) ;
}
g_list_free ( devices ) ;
g_object_unref ( enumerator ) ;
2013-03-27 22:23:24 +01:00
}
2015-04-14 16:39:51 +02:00
static void
dispose ( GObject * object )
{
2015-04-06 18:29:36 +02:00
NMPlatform * platform = NM_PLATFORM ( object ) ;
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( platform ) ;
2015-05-12 07:14:55 +02:00
_LOGD ( " dispose " ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
priv - > delayed_action . flags = DELAYED_ACTION_TYPE_NONE ;
g_ptr_array_set_size ( priv - > delayed_action . list_master_connected , 0 ) ;
g_ptr_array_set_size ( priv - > delayed_action . list_refresh_link , 0 ) ;
2015-04-06 18:29:36 +02:00
nm_clear_g_source ( & priv - > delayed_action . idle_id ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
g_clear_pointer ( & priv - > prune_candidates , g_hash_table_unref ) ;
g_clear_pointer ( & priv - > delayed_deletion , g_hash_table_unref ) ;
2015-04-14 16:39:51 +02:00
G_OBJECT_CLASS ( nm_linux_platform_parent_class ) - > dispose ( object ) ;
}
2013-03-27 22:23:24 +01:00
static void
nm_linux_platform_finalize ( GObject * object )
{
NMLinuxPlatformPrivate * priv = NM_LINUX_PLATFORM_GET_PRIVATE ( object ) ;
2015-04-06 18:29:36 +02:00
nmp_cache_free ( priv - > cache ) ;
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
g_ptr_array_unref ( priv - > delayed_action . list_master_connected ) ;
g_ptr_array_unref ( priv - > delayed_action . list_refresh_link ) ;
2015-04-06 18:29:36 +02:00
2013-03-27 22:23:24 +01:00
/* Free netlink resources */
g_source_remove ( priv - > event_id ) ;
g_io_channel_unref ( priv - > event_channel ) ;
nl_socket_free ( priv - > nlh ) ;
nl_socket_free ( priv - > nlh_event ) ;
2013-05-29 12:00:50 -03:00
g_object_unref ( priv - > udev_client ) ;
2014-02-04 14:27:03 +01:00
g_hash_table_unref ( priv - > wifi_data ) ;
2013-05-29 12:00:50 -03:00
2015-08-30 15:51:20 +02:00
if ( priv - > sysctl_get_prev_values )
g_hash_table_destroy ( priv - > sysctl_get_prev_values ) ;
2013-03-27 22:23:24 +01:00
G_OBJECT_CLASS ( nm_linux_platform_parent_class ) - > finalize ( object ) ;
}
# define OVERRIDE(function) platform_class->function = function
static void
nm_linux_platform_class_init ( NMLinuxPlatformClass * klass )
{
GObjectClass * object_class = G_OBJECT_CLASS ( klass ) ;
NMPlatformClass * platform_class = NM_PLATFORM_CLASS ( klass ) ;
g_type_class_add_private ( klass , sizeof ( NMLinuxPlatformPrivate ) ) ;
/* virtual methods */
2015-04-18 12:53:45 +02:00
object_class - > constructed = constructed ;
2015-04-14 16:39:51 +02:00
object_class - > dispose = dispose ;
2013-03-27 22:23:24 +01:00
object_class - > finalize = nm_linux_platform_finalize ;
2013-04-03 16:10:38 +02:00
platform_class - > sysctl_set = sysctl_set ;
platform_class - > sysctl_get = sysctl_get ;
2014-04-22 16:02:15 +02:00
platform_class - > link_get = _nm_platform_link_get ;
2015-06-20 12:05:01 +02:00
platform_class - > link_get_by_ifname = _nm_platform_link_get_by_ifname ;
2014-09-18 12:16:11 -05:00
platform_class - > link_get_by_address = _nm_platform_link_get_by_address ;
2013-03-27 22:23:24 +01:00
platform_class - > link_get_all = link_get_all ;
platform_class - > link_add = link_add ;
platform_class - > link_delete = link_delete ;
2013-04-26 11:43:08 -04:00
platform_class - > link_get_type_name = link_get_type_name ;
2015-01-22 16:41:15 +01:00
platform_class - > link_get_unmanaged = link_get_unmanaged ;
2013-03-27 22:23:24 +01:00
2014-02-11 13:58:00 +01:00
platform_class - > link_refresh = link_refresh ;
2013-03-27 22:23:24 +01:00
platform_class - > link_set_up = link_set_up ;
platform_class - > link_set_down = link_set_down ;
platform_class - > link_set_arp = link_set_arp ;
platform_class - > link_set_noarp = link_set_noarp ;
2013-03-27 22:23:24 +01:00
2015-06-15 14:41:35 +02:00
platform_class - > link_get_udi = link_get_udi ;
2015-06-15 15:19:28 +02:00
platform_class - > link_get_udev_device = link_get_udev_device ;
2014-10-13 08:26:52 +02:00
2014-07-24 15:57:08 -05:00
platform_class - > link_set_user_ipv6ll_enabled = link_set_user_ipv6ll_enabled ;
2013-03-27 22:53:55 +01:00
platform_class - > link_set_address = link_set_address ;
2014-10-03 17:37:26 -05:00
platform_class - > link_get_permanent_address = link_get_permanent_address ;
2013-04-15 21:48:12 +02:00
platform_class - > link_set_mtu = link_set_mtu ;
2013-03-27 22:53:55 +01:00
2013-10-11 14:59:26 -04:00
platform_class - > link_get_physical_port_id = link_get_physical_port_id ;
2015-03-24 12:35:36 -05:00
platform_class - > link_get_dev_id = link_get_dev_id ;
2014-02-05 11:56:44 +01:00
platform_class - > link_get_wake_on_lan = link_get_wake_on_lan ;
2014-10-03 13:41:49 -05:00
platform_class - > link_get_driver_info = link_get_driver_info ;
2013-10-11 14:59:26 -04:00
2013-03-27 22:53:55 +01:00
platform_class - > link_supports_carrier_detect = link_supports_carrier_detect ;
platform_class - > link_supports_vlans = link_supports_vlans ;
2013-03-27 22:53:55 +01:00
platform_class - > link_enslave = link_enslave ;
platform_class - > link_release = link_release ;
2013-04-04 17:07:47 +02:00
platform_class - > master_set_option = master_set_option ;
platform_class - > master_get_option = master_get_option ;
platform_class - > slave_set_option = slave_set_option ;
platform_class - > slave_get_option = slave_get_option ;
2013-03-27 22:53:55 +01:00
2013-03-27 22:53:55 +01:00
platform_class - > vlan_add = vlan_add ;
platform_class - > vlan_get_info = vlan_get_info ;
platform_class - > vlan_set_ingress_map = vlan_set_ingress_map ;
platform_class - > vlan_set_egress_map = vlan_set_egress_map ;
2013-06-10 16:21:08 -03:00
platform_class - > infiniband_partition_add = infiniband_partition_add ;
2014-10-06 09:37:34 -05:00
platform_class - > infiniband_get_info = infiniband_get_info ;
2013-06-10 16:21:08 -03:00
2013-05-03 13:55:51 -04:00
platform_class - > veth_get_properties = veth_get_properties ;
2013-04-25 15:46:39 -04:00
platform_class - > tun_get_properties = tun_get_properties ;
2013-05-06 09:16:17 -04:00
platform_class - > macvlan_get_properties = macvlan_get_properties ;
2013-06-04 10:31:22 -03:00
platform_class - > vxlan_get_properties = vxlan_get_properties ;
2013-05-21 12:49:24 -03:00
platform_class - > gre_get_properties = gre_get_properties ;
2013-05-03 13:55:51 -04:00
2014-02-04 14:27:03 +01:00
platform_class - > wifi_get_capabilities = wifi_get_capabilities ;
platform_class - > wifi_get_bssid = wifi_get_bssid ;
platform_class - > wifi_get_frequency = wifi_get_frequency ;
platform_class - > wifi_get_quality = wifi_get_quality ;
platform_class - > wifi_get_rate = wifi_get_rate ;
platform_class - > wifi_get_mode = wifi_get_mode ;
platform_class - > wifi_set_mode = wifi_set_mode ;
2014-10-23 14:19:59 -04:00
platform_class - > wifi_set_powersave = wifi_set_powersave ;
2014-02-04 14:27:03 +01:00
platform_class - > wifi_find_frequency = wifi_find_frequency ;
platform_class - > wifi_indicate_addressing_running = wifi_indicate_addressing_running ;
platform_class - > mesh_get_channel = mesh_get_channel ;
platform_class - > mesh_set_channel = mesh_set_channel ;
platform_class - > mesh_set_ssid = mesh_set_ssid ;
2015-07-14 12:37:58 +02:00
platform_class - > ip4_address_get = ip4_address_get ;
platform_class - > ip6_address_get = ip6_address_get ;
2013-03-27 22:23:24 +01:00
platform_class - > ip4_address_get_all = ip4_address_get_all ;
platform_class - > ip6_address_get_all = ip6_address_get_all ;
platform_class - > ip4_address_add = ip4_address_add ;
platform_class - > ip6_address_add = ip6_address_add ;
platform_class - > ip4_address_delete = ip4_address_delete ;
platform_class - > ip6_address_delete = ip6_address_delete ;
2013-03-27 22:23:24 +01:00
2015-07-14 12:37:58 +02:00
platform_class - > ip4_route_get = ip4_route_get ;
platform_class - > ip6_route_get = ip6_route_get ;
2013-03-27 22:23:24 +01:00
platform_class - > ip4_route_get_all = ip4_route_get_all ;
platform_class - > ip6_route_get_all = ip6_route_get_all ;
platform_class - > ip4_route_add = ip4_route_add ;
platform_class - > ip6_route_add = ip6_route_add ;
platform_class - > ip4_route_delete = ip4_route_delete ;
platform_class - > ip6_route_delete = ip6_route_delete ;
2014-01-07 17:21:12 +01:00
platform_class - > check_support_kernel_extended_ifa_flags = check_support_kernel_extended_ifa_flags ;
2014-07-24 15:57:08 -05:00
platform_class - > check_support_user_ipv6ll = check_support_user_ipv6ll ;
2015-06-19 15:38:41 +02:00
platform_class - > process_events = process_events ;
2013-03-27 22:23:24 +01:00
}
2015-04-06 18:29:36 +02:00