2018-05-04 09:02:53 +02:00
|
|
|
#!/usr/bin/env python
|
2022-04-21 12:00:45 +02:00
|
|
|
# coding: utf-8
|
2018-05-04 09:02:53 +02:00
|
|
|
|
|
|
|
|
from __future__ import print_function
|
|
|
|
|
|
2018-06-13 08:32:03 +02:00
|
|
|
###############################################################################
|
|
|
|
|
#
|
|
|
|
|
# This test starts NetworkManager stub service in a user D-Bus session,
|
|
|
|
|
# and runs nmcli against it. The output is recorded and compared to a pre-generated
|
2021-03-15 10:48:32 +01:00
|
|
|
# expected output (src/tests/client/test-client.check-on-disk/*.expected) which
|
2018-09-14 23:49:20 -04:00
|
|
|
# is also committed to git.
|
2018-06-13 08:32:03 +02:00
|
|
|
#
|
|
|
|
|
###############################################################################
|
|
|
|
|
#
|
|
|
|
|
# HOWTO: Regenerate output
|
|
|
|
|
#
|
|
|
|
|
# When adjusting the tests, or when making changes to nmcli that intentionally
|
|
|
|
|
# change the output, the expected output must be regenerated.
|
|
|
|
|
#
|
2022-05-02 19:01:24 +02:00
|
|
|
# For that, you'd setup your system correctly (see SETUP below) and then simply:
|
2021-08-26 13:45:28 +02:00
|
|
|
#
|
2021-03-15 10:48:32 +01:00
|
|
|
# $ NM_TEST_REGENERATE=1 make check-local-tests-client
|
2021-08-26 13:45:28 +02:00
|
|
|
# # Or `NM_TEST_REGENERATE=1 make check -j 10`
|
2018-06-13 08:32:03 +02:00
|
|
|
# $ git diff ... ; git add ...
|
2021-08-26 13:45:28 +02:00
|
|
|
# # The previous step regenerated the expected output. Review the changes
|
|
|
|
|
# # and consider whether they are correct. Then commit the changes to git.
|
|
|
|
|
#
|
2022-05-02 19:01:24 +02:00
|
|
|
# With meson, you can do
|
|
|
|
|
# $ meson -Ddocs=true --prefix=/tmp/nm1 build
|
|
|
|
|
# $ ninja -C build
|
|
|
|
|
# $ ninja -C build install
|
|
|
|
|
# $ NM_TEST_REGENERATE=1 ninja -C build test
|
|
|
|
|
#
|
|
|
|
|
# Beware that you need to install the sources, and beware to choose a prefix that doesn't
|
|
|
|
|
# mess up your system (see SETUP below).
|
|
|
|
|
#
|
|
|
|
|
# SETUP: For regenerating the output, the translations must work. First
|
|
|
|
|
# test whether the following works:
|
2021-08-26 13:45:28 +02:00
|
|
|
#
|
|
|
|
|
# 1) LANG=pl_PL.UTF-8 /usr/bin/nmcli --version
|
|
|
|
|
# # Ensure that Polish output works for the system-installed nmcli.
|
|
|
|
|
# # If not, you should ensure that `locale -a` reports the Polish
|
|
|
|
|
# # locale. If that is not the case, how to enable the locale depends on
|
|
|
|
|
# # your distro.
|
|
|
|
|
# #
|
|
|
|
|
# # On Debian, you might do:
|
|
|
|
|
# # sed -i 's/^# \(pl_PL.UTF-8 .*\)$/\1/p' /etc/locale.gen
|
|
|
|
|
# # locale-gen pl_PL.UTF-8
|
2022-04-20 11:21:07 +02:00
|
|
|
# # On Fedora, you might install `glibc-langpack-pl` package.
|
2021-08-26 13:45:28 +02:00
|
|
|
#
|
|
|
|
|
# 2) LANG=pl_PL.UTF-8 ./src/nmcli/nmcli --version
|
|
|
|
|
# # Ensure that the built nmcli has Polish locale working. If not,
|
|
|
|
|
# # you probably need to first `make install` the application at the
|
|
|
|
|
# # correct prefix. Take care to configure the build with the desired
|
|
|
|
|
# # prefix, like `./configure --prefix=/opt/tmp`. Usually, you want to avoid
|
|
|
|
|
# # using /usr as prefix, because that might overwrite files from your
|
|
|
|
|
# # package management system.
|
2018-06-13 08:32:03 +02:00
|
|
|
#
|
|
|
|
|
###############################################################################
|
|
|
|
|
#
|
|
|
|
|
# Environment variables to configure test:
|
|
|
|
|
|
|
|
|
|
# (optional) The build dir. Optional, mainly used to find the nmcli binary (in case
|
|
|
|
|
# ENV_NM_TEST_CLIENT_NMCLI_PATH is not set.
|
2020-06-09 16:28:32 -04:00
|
|
|
ENV_NM_TEST_CLIENT_BUILDDIR = "NM_TEST_CLIENT_BUILDDIR"
|
2018-06-13 08:32:03 +02:00
|
|
|
|
|
|
|
|
# (optional) Path to nmcli. By default, it looks for nmcli in build dir.
|
|
|
|
|
# In particular, you can test also a nmcli binary installed somewhere else.
|
2020-06-09 16:28:32 -04:00
|
|
|
ENV_NM_TEST_CLIENT_NMCLI_PATH = "NM_TEST_CLIENT_NMCLI_PATH"
|
2018-06-13 08:32:03 +02:00
|
|
|
|
2023-03-02 19:35:38 +01:00
|
|
|
# (optional) Path to nm-cloud-setup. By default, it looks for nm-cloud-setup
|
|
|
|
|
# in build dir.
|
|
|
|
|
ENV_NM_TEST_CLIENT_CLOUD_SETUP_PATH = "NM_TEST_CLIENT_CLOUD_SETUP_PATH"
|
|
|
|
|
|
2018-06-13 08:32:03 +02:00
|
|
|
# (optional) The test also compares tranlsated output (l10n). This requires,
|
|
|
|
|
# that you first install the translation in the right place. So, by default,
|
|
|
|
|
# if a test for a translation fails, it will mark the test as skipped, and not
|
|
|
|
|
# fail the tests. Under the assumption, that the test cannot succeed currently.
|
|
|
|
|
# By setting NM_TEST_CLIENT_CHECK_L10N=1, you can force a failure of the test.
|
2020-06-09 16:28:32 -04:00
|
|
|
ENV_NM_TEST_CLIENT_CHECK_L10N = "NM_TEST_CLIENT_CHECK_L10N"
|
2018-06-13 08:32:03 +02:00
|
|
|
|
|
|
|
|
# Regenerate the .expected files. Instead of asserting, rewrite the files
|
|
|
|
|
# on disk with the expected output.
|
2020-06-09 16:28:32 -04:00
|
|
|
ENV_NM_TEST_REGENERATE = "NM_TEST_REGENERATE"
|
2018-06-13 08:32:03 +02:00
|
|
|
|
2019-09-26 13:45:15 +02:00
|
|
|
# whether the file location should include the line number. That is useful
|
|
|
|
|
# only for debugging, to correlate the expected output with the test.
|
|
|
|
|
# Obviously, since the expected output is commited to git without line numbers,
|
|
|
|
|
# you'd have to first NM_TEST_REGENERATE the test expected data, with line
|
|
|
|
|
# numbers enabled.
|
2020-06-09 16:28:32 -04:00
|
|
|
ENV_NM_TEST_WITH_LINENO = "NM_TEST_WITH_LINENO"
|
2019-09-26 13:45:15 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
ENV_NM_TEST_ASAN_OPTIONS = "NM_TEST_ASAN_OPTIONS"
|
|
|
|
|
ENV_NM_TEST_LSAN_OPTIONS = "NM_TEST_LSAN_OPTIONS"
|
|
|
|
|
ENV_NM_TEST_UBSAN_OPTIONS = "NM_TEST_UBSAN_OPTIONS"
|
2020-05-13 22:17:50 +02:00
|
|
|
|
2023-02-03 14:07:25 +01:00
|
|
|
# Run nmcli under valgrind. If unset, we honor NMTST_USE_VALGRIND instead.
|
|
|
|
|
# Valgrind is always disabled, if NM_TEST_REGENERATE is enabled.
|
|
|
|
|
ENV_NM_TEST_VALGRIND = "NM_TEST_VALGRIND"
|
|
|
|
|
|
|
|
|
|
ENV_LIBTOOL = "LIBTOOL"
|
|
|
|
|
|
2018-06-13 08:32:03 +02:00
|
|
|
###############################################################################
|
|
|
|
|
|
2023-02-07 12:40:50 +01:00
|
|
|
import collections
|
|
|
|
|
import dbus
|
|
|
|
|
import dbus.mainloop.glib
|
|
|
|
|
import dbus.service
|
2018-05-04 09:02:53 +02:00
|
|
|
import errno
|
2019-11-06 15:30:55 +01:00
|
|
|
import fcntl
|
2023-02-07 12:40:50 +01:00
|
|
|
import io
|
|
|
|
|
import itertools
|
|
|
|
|
import os
|
clients/tests: don't wait for first job before scheduling parallel jobs
Previously, the test would kick off 15 processes in parallel, but
the first job in the queue would block more processes from being
started.
That is, async_start() would only start 15 processes, but since none of
them were reaped before async_wait() was called, no more than 15 jobs
were running during the start phase. That is not a real issue, because
the start phase is non-blocking and queues all the jobs quickly. It's
not really expected that during that time many processes already completed.
Anyway, this was a bit ugly.
The bigger problem is that async_wait() would always block for the
first job to complete, before starting more processes. That means,
if the first job in the queue takes unusually long, then this blocks
other processes from getting reaped and new processes from being
started.
Instead, don't block only one one jobs, but poll them in turn for a
short amount of time. Whichever process exits first will be completed
and more jobs will be started.
In fact, in the current setup it's hard to notice any difference,
because all nmcli invocations take about the same time and are
relatively fast. That this approach parallelizes better can be seen
when the runtime of jobs varies stronger (and some invocations take
a notably longer time). As we later want to run nmcli under valgrind,
this probably will make a difference.
An alternative would be not to poll()/wait() for child processes,
but somehow get notified. For example, we could use a GMainContext
and watch child processes. But that's probably more complicated
to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
|
|
|
import random
|
2023-02-07 12:40:50 +01:00
|
|
|
import re
|
|
|
|
|
import shlex
|
|
|
|
|
import signal
|
|
|
|
|
import socket
|
|
|
|
|
import subprocess
|
|
|
|
|
import sys
|
2023-02-03 14:07:25 +01:00
|
|
|
import tempfile
|
2023-02-07 12:40:50 +01:00
|
|
|
import time
|
|
|
|
|
import unittest
|
2022-08-23 15:17:32 +02:00
|
|
|
|
|
|
|
|
import gi
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
from gi.repository import GLib
|
2022-08-24 14:00:57 +02:00
|
|
|
except ImportError:
|
2022-08-23 15:17:32 +02:00
|
|
|
GLib = None
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
gi.require_version("NM", "1.0")
|
2022-08-24 14:00:57 +02:00
|
|
|
except ValueError:
|
2022-08-23 15:17:32 +02:00
|
|
|
NM = None
|
2022-08-24 14:00:57 +02:00
|
|
|
else:
|
|
|
|
|
try:
|
|
|
|
|
from gi.repository import NM
|
|
|
|
|
except ImportError:
|
|
|
|
|
NM = None
|
2022-08-23 15:17:32 +02:00
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
import pexpect
|
2022-08-26 00:00:01 +02:00
|
|
|
except ImportError:
|
2022-08-23 15:17:32 +02:00
|
|
|
pexpect = None
|
|
|
|
|
|
2023-03-02 19:35:38 +01:00
|
|
|
try:
|
|
|
|
|
from http.server import HTTPServer
|
|
|
|
|
from http.server import BaseHTTPRequestHandler
|
|
|
|
|
except ImportError:
|
|
|
|
|
HTTPServer = None
|
|
|
|
|
|
2018-05-04 09:02:53 +02:00
|
|
|
|
|
|
|
|
###############################################################################
|
|
|
|
|
|
|
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
class PathConfiguration:
|
2018-05-04 09:02:53 +02:00
|
|
|
@staticmethod
|
|
|
|
|
def srcdir():
|
|
|
|
|
# this is the directory where the test script itself lies.
|
|
|
|
|
# Based on this directory, we find other parts that we expect
|
|
|
|
|
# in the source repository.
|
|
|
|
|
return os.path.dirname(os.path.abspath(__file__))
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
def top_srcdir():
|
2021-03-15 10:48:32 +01:00
|
|
|
return os.path.abspath(PathConfiguration.srcdir() + "/../../..")
|
2018-05-04 09:02:53 +02:00
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
def test_networkmanager_service_path():
|
2020-06-09 16:28:32 -04:00
|
|
|
v = os.path.abspath(
|
|
|
|
|
PathConfiguration.top_srcdir() + "/tools/test-networkmanager-service.py"
|
|
|
|
|
)
|
|
|
|
|
assert os.path.exists(v), 'Cannot find test server at "%s"' % (v)
|
2018-05-04 09:02:53 +02:00
|
|
|
return v
|
|
|
|
|
|
2023-03-02 19:35:38 +01:00
|
|
|
@staticmethod
|
|
|
|
|
def test_cloud_meta_mock_path():
|
|
|
|
|
v = os.path.abspath(
|
|
|
|
|
PathConfiguration.top_srcdir() + "/tools/test-cloud-meta-mock.py"
|
|
|
|
|
)
|
|
|
|
|
assert os.path.exists(v), 'Cannot find cloud metadata mock server at "%s"' % (v)
|
|
|
|
|
return v
|
|
|
|
|
|
2018-06-13 08:32:03 +02:00
|
|
|
@staticmethod
|
|
|
|
|
def canonical_script_filename():
|
2021-03-15 10:48:32 +01:00
|
|
|
p = "src/tests/client/test-client.py"
|
2020-06-09 16:28:32 -04:00
|
|
|
assert (PathConfiguration.top_srcdir() + "/" + p) == os.path.abspath(__file__)
|
2018-06-13 08:32:03 +02:00
|
|
|
return p
|
|
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
|
2018-05-04 09:02:53 +02:00
|
|
|
###############################################################################
|
|
|
|
|
|
|
|
|
|
dbus_session_inited = False
|
|
|
|
|
|
|
|
|
|
_DEFAULT_ARG = object()
|
2018-06-11 16:36:47 +02:00
|
|
|
_UNSTABLE_OUTPUT = object()
|
2018-05-04 09:02:53 +02:00
|
|
|
|
|
|
|
|
###############################################################################
|
|
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
|
2018-05-04 09:02:53 +02:00
|
|
|
class Util:
|
|
|
|
|
|
2020-03-23 13:20:35 +01:00
|
|
|
_signal_no_lookup = {
|
2020-06-09 16:28:32 -04:00
|
|
|
1: "SIGHUP",
|
|
|
|
|
2: "SIGINT",
|
|
|
|
|
3: "SIGQUIT",
|
|
|
|
|
4: "SIGILL",
|
|
|
|
|
5: "SIGTRAP",
|
|
|
|
|
6: "SIGABRT",
|
|
|
|
|
8: "SIGFPE",
|
|
|
|
|
9: "SIGKILL",
|
2020-03-23 13:20:35 +01:00
|
|
|
11: "SIGSEGV",
|
|
|
|
|
12: "SIGSYS",
|
|
|
|
|
13: "SIGPIPE",
|
|
|
|
|
14: "SIGALRM",
|
|
|
|
|
15: "SIGTERM",
|
|
|
|
|
16: "SIGURG",
|
|
|
|
|
17: "SIGSTOP",
|
|
|
|
|
18: "SIGTSTP",
|
|
|
|
|
19: "SIGCONT",
|
|
|
|
|
20: "SIGCHLD",
|
|
|
|
|
21: "SIGTTIN",
|
|
|
|
|
22: "SIGTTOU",
|
|
|
|
|
23: "SIGPOLL",
|
|
|
|
|
24: "SIGXCPU",
|
|
|
|
|
25: "SIGXFSZ",
|
|
|
|
|
26: "SIGVTALRM",
|
|
|
|
|
27: "SIGPROF",
|
|
|
|
|
30: "SIGUSR1",
|
|
|
|
|
31: "SIGUSR2",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
@classmethod
|
2023-02-07 12:40:50 +01:00
|
|
|
def signal_no_to_str(cls, sig):
|
|
|
|
|
s = cls._signal_no_lookup.get(sig, None)
|
2020-03-23 13:20:35 +01:00
|
|
|
if s is None:
|
2023-02-07 12:40:50 +01:00
|
|
|
return "<unknown %d>" % (sig)
|
2020-03-23 13:20:35 +01:00
|
|
|
return s
|
|
|
|
|
|
2018-05-04 09:02:53 +02:00
|
|
|
@staticmethod
|
2020-06-09 16:28:32 -04:00
|
|
|
def python_has_version(major, minor=0):
|
|
|
|
|
return sys.version_info[0] > major or (
|
|
|
|
|
sys.version_info[0] == major and sys.version_info[1] >= minor
|
|
|
|
|
)
|
2018-05-04 09:02:53 +02:00
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
def is_string(s):
|
|
|
|
|
if Util.python_has_version(3):
|
|
|
|
|
t = str
|
|
|
|
|
else:
|
|
|
|
|
t = basestring
|
|
|
|
|
return isinstance(s, t)
|
|
|
|
|
|
2023-02-03 14:06:46 +01:00
|
|
|
@staticmethod
|
|
|
|
|
def is_bool(s, defval=False):
|
|
|
|
|
if s is None:
|
|
|
|
|
return defval
|
|
|
|
|
if isinstance(s, int):
|
|
|
|
|
return s != 0
|
|
|
|
|
if isinstance(s, str):
|
|
|
|
|
if s.lower() in ["1", "y", "yes", "true", "on"]:
|
|
|
|
|
return True
|
|
|
|
|
if s.lower() in ["0", "n", "no", "false", "off"]:
|
|
|
|
|
return False
|
|
|
|
|
raise ValueError('Argument "%s" is not a boolean' % (s,))
|
|
|
|
|
|
2022-04-20 08:37:09 +02:00
|
|
|
@staticmethod
|
|
|
|
|
def as_bytes(s):
|
|
|
|
|
if Util.is_string(s):
|
|
|
|
|
return s.encode("utf-8")
|
|
|
|
|
assert isinstance(s, bytes)
|
|
|
|
|
return s
|
|
|
|
|
|
2018-07-24 10:14:12 +02:00
|
|
|
@staticmethod
|
|
|
|
|
def memoize_nullary(nullary_func):
|
|
|
|
|
result = []
|
2020-06-09 16:28:32 -04:00
|
|
|
|
2018-07-24 10:14:12 +02:00
|
|
|
def closure():
|
|
|
|
|
if not result:
|
|
|
|
|
result.append(nullary_func())
|
|
|
|
|
return result[0]
|
2020-06-09 16:28:32 -04:00
|
|
|
|
2018-07-24 10:14:12 +02:00
|
|
|
return closure
|
|
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
_find_unsafe = re.compile(
|
|
|
|
|
r"[^\w@%+=:,./-]", re.ASCII if sys.version_info[0] >= 3 else 0
|
|
|
|
|
).search
|
2018-05-04 09:02:53 +02:00
|
|
|
|
|
|
|
|
@staticmethod
|
2023-02-03 13:26:32 +01:00
|
|
|
def shlex_quote(s):
|
|
|
|
|
# Reimplement shlex.quote().
|
2018-05-04 09:02:53 +02:00
|
|
|
if Util.python_has_version(3, 3):
|
|
|
|
|
return shlex.quote(s)
|
|
|
|
|
if not s:
|
|
|
|
|
return "''"
|
|
|
|
|
if Util._find_unsafe(s) is None:
|
|
|
|
|
return s
|
|
|
|
|
return "'" + s.replace("'", "'\"'\"'") + "'"
|
|
|
|
|
|
2023-02-03 13:26:32 +01:00
|
|
|
@staticmethod
|
|
|
|
|
def shlex_join(args):
|
|
|
|
|
# Reimplement shlex.join()
|
|
|
|
|
return " ".join(Util.shlex_quote(s) for s in args)
|
|
|
|
|
|
2018-05-04 09:02:53 +02:00
|
|
|
@staticmethod
|
2020-06-09 16:28:32 -04:00
|
|
|
def popen_wait(p, timeout=0):
|
|
|
|
|
(res, b_stdout, b_stderr) = Util.popen_wait_read(
|
|
|
|
|
p, timeout=timeout, read_std_pipes=False
|
|
|
|
|
)
|
2019-11-06 15:30:55 +01:00
|
|
|
return res
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
2020-06-09 16:28:32 -04:00
|
|
|
def popen_wait_read(p, timeout=0, read_std_pipes=True):
|
2018-06-15 14:42:11 +02:00
|
|
|
start = NM.utils_get_timestamp_msec()
|
2019-10-13 13:38:04 +02:00
|
|
|
delay = 0.0005
|
2020-06-09 16:28:32 -04:00
|
|
|
b_stdout = b""
|
|
|
|
|
b_stderr = b""
|
2019-11-06 15:30:55 +01:00
|
|
|
res = None
|
2018-05-04 09:02:53 +02:00
|
|
|
while True:
|
2019-11-06 15:30:55 +01:00
|
|
|
if read_std_pipes:
|
|
|
|
|
b_stdout += Util.buffer_read(p.stdout)
|
|
|
|
|
b_stderr += Util.buffer_read(p.stderr)
|
2018-05-04 09:02:53 +02:00
|
|
|
if p.poll() is not None:
|
2019-11-06 15:30:55 +01:00
|
|
|
res = p.returncode
|
|
|
|
|
break
|
2019-10-13 13:38:04 +02:00
|
|
|
if timeout == 0:
|
2019-11-06 15:30:55 +01:00
|
|
|
break
|
2020-06-09 16:28:32 -04:00
|
|
|
assert timeout > 0
|
2019-10-13 13:38:04 +02:00
|
|
|
remaining = timeout - ((NM.utils_get_timestamp_msec() - start) / 1000.0)
|
|
|
|
|
if remaining <= 0:
|
2019-11-06 15:30:55 +01:00
|
|
|
break
|
2019-10-13 13:38:04 +02:00
|
|
|
delay = min(delay * 2, remaining, 0.05)
|
|
|
|
|
time.sleep(delay)
|
2019-11-06 15:30:55 +01:00
|
|
|
return (res, b_stdout, b_stderr)
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
def buffer_read(buf):
|
2020-06-09 16:28:32 -04:00
|
|
|
b = b""
|
2019-11-06 15:30:55 +01:00
|
|
|
while True:
|
|
|
|
|
try:
|
|
|
|
|
b1 = buf.read()
|
|
|
|
|
except io.BlockingIOError:
|
2020-06-09 16:28:32 -04:00
|
|
|
b1 = b""
|
2019-11-06 15:30:55 +01:00
|
|
|
except IOError:
|
2020-06-09 16:28:32 -04:00
|
|
|
b1 = b""
|
2019-11-06 15:30:55 +01:00
|
|
|
if not b1:
|
|
|
|
|
return b
|
|
|
|
|
b += b1
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
def buffer_set_nonblock(buf):
|
|
|
|
|
fd = buf.fileno()
|
|
|
|
|
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
|
|
|
|
|
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
|
2018-05-04 09:02:53 +02:00
|
|
|
|
clients/tests: don't wait for first job before scheduling parallel jobs
Previously, the test would kick off 15 processes in parallel, but
the first job in the queue would block more processes from being
started.
That is, async_start() would only start 15 processes, but since none of
them were reaped before async_wait() was called, no more than 15 jobs
were running during the start phase. That is not a real issue, because
the start phase is non-blocking and queues all the jobs quickly. It's
not really expected that during that time many processes already completed.
Anyway, this was a bit ugly.
The bigger problem is that async_wait() would always block for the
first job to complete, before starting more processes. That means,
if the first job in the queue takes unusually long, then this blocks
other processes from getting reaped and new processes from being
started.
Instead, don't block only one one jobs, but poll them in turn for a
short amount of time. Whichever process exits first will be completed
and more jobs will be started.
In fact, in the current setup it's hard to notice any difference,
because all nmcli invocations take about the same time and are
relatively fast. That this approach parallelizes better can be seen
when the runtime of jobs varies stronger (and some invocations take
a notably longer time). As we later want to run nmcli under valgrind,
this probably will make a difference.
An alternative would be not to poll()/wait() for child processes,
but somehow get notified. For example, we could use a GMainContext
and watch child processes. But that's probably more complicated
to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
|
|
|
@staticmethod
|
|
|
|
|
def random_job(jobs):
|
|
|
|
|
jobs = list(jobs)
|
|
|
|
|
l = len(jobs)
|
|
|
|
|
t = l * (l + 1) / 2
|
|
|
|
|
while True:
|
|
|
|
|
# we return a random jobs from the list, but the indexes at the front of
|
|
|
|
|
# the list are more likely. The idea is, that those jobs were started first,
|
|
|
|
|
# and are expected to complete first. As we poll, we want to check more frequently
|
|
|
|
|
# on the elements at the beginning of the list...
|
|
|
|
|
#
|
|
|
|
|
# Let's assign probabilities with an arithmetic series.
|
|
|
|
|
# That is, if there are 16 jobs, then the first gets weighted
|
|
|
|
|
# with 16, the second with 15, then 14, and so on, until the
|
|
|
|
|
# last has weight 1. That means, the first element is 16 times
|
|
|
|
|
# more probable than the last.
|
|
|
|
|
# Element at idx (starting with 0) is picked with probability
|
|
|
|
|
# 1 / (l*(l+1)/2) * (l - idx)
|
|
|
|
|
r = random.random() * t
|
|
|
|
|
idx = 0
|
|
|
|
|
rx = 0
|
|
|
|
|
while True:
|
2020-06-09 16:28:32 -04:00
|
|
|
rx += l - idx
|
clients/tests: don't wait for first job before scheduling parallel jobs
Previously, the test would kick off 15 processes in parallel, but
the first job in the queue would block more processes from being
started.
That is, async_start() would only start 15 processes, but since none of
them were reaped before async_wait() was called, no more than 15 jobs
were running during the start phase. That is not a real issue, because
the start phase is non-blocking and queues all the jobs quickly. It's
not really expected that during that time many processes already completed.
Anyway, this was a bit ugly.
The bigger problem is that async_wait() would always block for the
first job to complete, before starting more processes. That means,
if the first job in the queue takes unusually long, then this blocks
other processes from getting reaped and new processes from being
started.
Instead, don't block only one one jobs, but poll them in turn for a
short amount of time. Whichever process exits first will be completed
and more jobs will be started.
In fact, in the current setup it's hard to notice any difference,
because all nmcli invocations take about the same time and are
relatively fast. That this approach parallelizes better can be seen
when the runtime of jobs varies stronger (and some invocations take
a notably longer time). As we later want to run nmcli under valgrind,
this probably will make a difference.
An alternative would be not to poll()/wait() for child processes,
but somehow get notified. For example, we could use a GMainContext
and watch child processes. But that's probably more complicated
to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
|
|
|
if rx >= r or idx == l - 1:
|
|
|
|
|
yield jobs[idx]
|
|
|
|
|
break
|
|
|
|
|
idx += 1
|
|
|
|
|
|
2018-05-04 09:02:53 +02:00
|
|
|
@staticmethod
|
2020-06-09 16:28:32 -04:00
|
|
|
def iter_single(itr, min_num=1, max_num=1):
|
2018-05-04 09:02:53 +02:00
|
|
|
itr = list(itr)
|
|
|
|
|
n = 0
|
|
|
|
|
v = None
|
|
|
|
|
for c in itr:
|
|
|
|
|
n += 1
|
|
|
|
|
if n > 1:
|
|
|
|
|
break
|
|
|
|
|
v = c
|
|
|
|
|
if n < min_num:
|
2020-06-09 16:28:32 -04:00
|
|
|
raise AssertionError(
|
|
|
|
|
"Expected at least %s elements, but %s found" % (min_num, n)
|
|
|
|
|
)
|
2018-05-04 09:02:53 +02:00
|
|
|
if n > max_num:
|
2020-06-09 16:28:32 -04:00
|
|
|
raise AssertionError(
|
|
|
|
|
"Expected at most %s elements, but %s found" % (max_num, n)
|
|
|
|
|
)
|
2018-05-04 09:02:53 +02:00
|
|
|
return v
|
|
|
|
|
|
2018-05-27 20:58:21 +02:00
|
|
|
@staticmethod
|
|
|
|
|
def file_read(filename):
|
|
|
|
|
try:
|
2020-06-09 16:28:32 -04:00
|
|
|
with open(filename, "rb") as f:
|
2018-05-27 20:58:21 +02:00
|
|
|
return f.read()
|
|
|
|
|
except:
|
|
|
|
|
return None
|
|
|
|
|
|
2022-04-20 08:37:09 +02:00
|
|
|
@staticmethod
|
|
|
|
|
def _replace_text_match_join(split_arr, replacement):
|
|
|
|
|
yield split_arr[0]
|
|
|
|
|
for t in split_arr[1:]:
|
|
|
|
|
yield (replacement,)
|
|
|
|
|
yield t
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
def ReplaceTextSimple(search, replacement):
|
|
|
|
|
# This gives a function that can be used by Util.replace_text().
|
|
|
|
|
# The function replaces an input bytes string @t. It must either return
|
|
|
|
|
# a bytes string, a list containing bytes strings and/or 1-tuples (the
|
|
|
|
|
# latter containing one bytes string).
|
|
|
|
|
# The 1-tuple acts as a placeholder for atomic text, that cannot be replaced
|
|
|
|
|
# a second time.
|
|
|
|
|
#
|
|
|
|
|
# Search for replace_text_fcn in Util.replace_text() where this is called.
|
|
|
|
|
replacement = Util.as_bytes(replacement)
|
|
|
|
|
|
|
|
|
|
if callable(search):
|
|
|
|
|
search_fcn = search
|
|
|
|
|
else:
|
|
|
|
|
search_fcn = lambda: search
|
|
|
|
|
|
|
|
|
|
def replace_fcn(t):
|
|
|
|
|
assert isinstance(t, bytes)
|
|
|
|
|
search_txt = search_fcn()
|
|
|
|
|
if search_txt is None:
|
|
|
|
|
return t
|
|
|
|
|
search_txt = Util.as_bytes(search_txt)
|
|
|
|
|
return Util._replace_text_match_join(t.split(search_txt), replacement)
|
|
|
|
|
|
|
|
|
|
return replace_fcn
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
2022-04-20 15:47:35 +02:00
|
|
|
def ReplaceTextRegex(pattern, replacement):
|
2022-04-20 08:37:09 +02:00
|
|
|
# See ReplaceTextSimple.
|
|
|
|
|
pattern = Util.as_bytes(pattern)
|
|
|
|
|
replacement = Util.as_bytes(replacement)
|
|
|
|
|
p = re.compile(pattern)
|
|
|
|
|
return lambda t: Util._replace_text_match_join(p.split(t), replacement)
|
clients/tests: fix regular expression match in Util.replace_text()
Seems the previous code did not work properly:
With python36-3.6.8-38.module+el8.5.0+12207+5c5719bc.x86_6 on rhel-8.6:
Traceback (most recent call last):
File "/root/nm-build/NetworkManager/src/tests/client/test-client.py", line 1157, in f
func(self)
File "/root/nm-build/NetworkManager/src/tests/client/test-client.py", line 1724, in test_offline
replace_stdout=replace_uuids,
File "/root/nm-build/NetworkManager/src/tests/client/test-client.py", line 797, in call_nmcli
frame,
File "/root/nm-build/NetworkManager/src/tests/client/test-client.py", line 997, in _call_nmcli
self.async_start(wait_all=sync_barrier)
File "/root/nm-build/NetworkManager/src/tests/client/test-client.py", line 1032, in async_start
async_job.wait_and_complete()
File "/root/nm-build/NetworkManager/src/tests/client/test-client.py", line 670, in wait_and_complete
self._complete_cb(self, return_code, stdout, stderr)
File "/root/nm-build/NetworkManager/src/tests/client/test-client.py", line 919, in complete_cb
stdout = Util.replace_text(stdout, replace_stdout)
File "/root/nm-build/NetworkManager/src/tests/client/test-client.py", line 362, in replace_text
if Util.is_regex_pattern(v_search):
File "/root/nm-build/NetworkManager/src/tests/client/test-client.py", line 208, in is_regex_pattern
t = re.Pattern
AttributeError: module 're' has no attribute 'Pattern'
On this python version, re.compile() give an object of type
_sre.SRE_Pattern.
# python -c 'import re; print(type(re.compile("a")))'
<class '_sre.SRE_Pattern'>
Fixes: beebde9e56c3 ('client/test: allow matching and replacing regex-es in nmcli output')
2022-04-20 08:37:09 +02:00
|
|
|
|
2018-05-28 08:57:25 +02:00
|
|
|
@staticmethod
|
|
|
|
|
def replace_text(text, replace_arr):
|
|
|
|
|
if not replace_arr:
|
|
|
|
|
return text
|
2019-12-13 10:00:38 +01:00
|
|
|
needs_encode = Util.python_has_version(3) and Util.is_string(text)
|
|
|
|
|
if needs_encode:
|
2020-06-09 16:28:32 -04:00
|
|
|
text = text.encode("utf-8")
|
2018-05-28 08:57:25 +02:00
|
|
|
text = [text]
|
2022-04-20 08:37:09 +02:00
|
|
|
for replace_text_fcn in replace_arr:
|
2018-05-28 08:57:25 +02:00
|
|
|
text2 = []
|
|
|
|
|
for t in text:
|
2022-04-20 08:37:09 +02:00
|
|
|
# tuples are markers for atomic strings. They won't be replaced a second
|
|
|
|
|
# time.
|
|
|
|
|
if not isinstance(t, tuple):
|
|
|
|
|
t = replace_text_fcn(t)
|
|
|
|
|
if isinstance(t, bytes) or isinstance(t, tuple):
|
2018-05-28 08:57:25 +02:00
|
|
|
text2.append(t)
|
2022-04-20 08:37:09 +02:00
|
|
|
else:
|
|
|
|
|
text2.extend(t)
|
2018-05-28 08:57:25 +02:00
|
|
|
text = text2
|
2020-06-09 16:28:32 -04:00
|
|
|
bb = b"".join([(t[0] if isinstance(t, tuple) else t) for t in text])
|
2019-12-13 10:00:38 +01:00
|
|
|
if needs_encode:
|
2020-06-09 16:28:32 -04:00
|
|
|
bb = bb.decode("utf-8")
|
2019-12-13 10:00:38 +01:00
|
|
|
return bb
|
|
|
|
|
|
2019-12-13 10:05:00 +01:00
|
|
|
@staticmethod
|
|
|
|
|
def replace_text_sort_list(lst, replace_arr):
|
2020-06-09 16:28:32 -04:00
|
|
|
lst = [(Util.replace_text(elem, replace_arr), elem) for elem in lst]
|
2019-12-13 10:05:00 +01:00
|
|
|
lst = sorted(lst)
|
2020-06-09 16:28:32 -04:00
|
|
|
lst = [tup[1] for tup in lst]
|
2019-12-13 10:05:00 +01:00
|
|
|
return list(lst)
|
2018-05-28 08:57:25 +02:00
|
|
|
|
2018-06-06 09:55:23 +02:00
|
|
|
@staticmethod
|
|
|
|
|
def debug_dbus_interface():
|
|
|
|
|
# this is for printf debugging, not used in actual code.
|
2020-06-09 16:28:32 -04:00
|
|
|
os.system(
|
|
|
|
|
"busctl --user --verbose call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects | cat"
|
|
|
|
|
)
|
2018-06-06 09:55:23 +02:00
|
|
|
|
2018-06-11 17:13:10 +02:00
|
|
|
@staticmethod
|
|
|
|
|
def iter_nmcli_output_modes():
|
2020-06-09 16:28:32 -04:00
|
|
|
for mode in [[], ["--mode", "tabular"], ["--mode", "multiline"]]:
|
|
|
|
|
for fmt in [[], ["--pretty"], ["--terse"]]:
|
|
|
|
|
for color in [[], ["--color", "yes"]]:
|
2018-06-21 16:51:43 +02:00
|
|
|
yield mode + fmt + color
|
2018-06-11 17:13:10 +02:00
|
|
|
|
2023-02-03 14:07:25 +01:00
|
|
|
@staticmethod
|
|
|
|
|
def valgrind_check_log(valgrind_log, logname):
|
|
|
|
|
if valgrind_log is None:
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
fd, name = valgrind_log
|
|
|
|
|
|
|
|
|
|
os.close(fd)
|
|
|
|
|
|
|
|
|
|
if not os.path.isfile(name):
|
|
|
|
|
raise Exception("valgrind log %s unexpectedly does not exist" % (name,))
|
|
|
|
|
|
|
|
|
|
if os.path.getsize(name) != 0:
|
|
|
|
|
out = subprocess.run(
|
|
|
|
|
[
|
|
|
|
|
"sed",
|
|
|
|
|
"-e",
|
|
|
|
|
"/^--[0-9]\+-- WARNING: unhandled .* syscall: /,/^--[0-9]\+-- it at http.*\.$/d",
|
|
|
|
|
name,
|
|
|
|
|
],
|
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
|
stderr=subprocess.STDOUT,
|
|
|
|
|
)
|
|
|
|
|
if out.returncode != 0:
|
|
|
|
|
raise Exception('Calling "sed" to search valgrind log failed')
|
|
|
|
|
if out.stdout:
|
|
|
|
|
print("valgrind log %s for %s is not empty:" % (name, logname))
|
|
|
|
|
print("\n%s\n" % (out.stdout.decode("utf-8", errors="replace"),))
|
|
|
|
|
raise Exception("valgrind log %s unexpectedly is not empty" % (name,))
|
|
|
|
|
|
|
|
|
|
os.remove(name)
|
|
|
|
|
|
2023-02-07 11:26:58 +01:00
|
|
|
@staticmethod
|
|
|
|
|
def pexpect_expect_all(pexp, *pattern_list):
|
|
|
|
|
# This will call "pexpect.expect()" on pattern_list,
|
|
|
|
|
# expecting all entries to match exactly once, in any
|
|
|
|
|
# order.
|
|
|
|
|
pattern_list = list(pattern_list)
|
|
|
|
|
while pattern_list:
|
|
|
|
|
idx = pexp.expect(pattern_list)
|
|
|
|
|
del pattern_list[idx]
|
|
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
|
2018-05-04 09:02:53 +02:00
|
|
|
###############################################################################
|
|
|
|
|
|
|
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
class Configuration:
|
2018-05-04 09:02:53 +02:00
|
|
|
def __init__(self):
|
|
|
|
|
self._values = {}
|
|
|
|
|
|
|
|
|
|
def get(self, name):
|
|
|
|
|
v = self._values.get(name, None)
|
|
|
|
|
if name in self._values:
|
|
|
|
|
return v
|
|
|
|
|
if name == ENV_NM_TEST_CLIENT_BUILDDIR:
|
2020-06-09 16:28:32 -04:00
|
|
|
v = os.environ.get(
|
|
|
|
|
ENV_NM_TEST_CLIENT_BUILDDIR, PathConfiguration.top_srcdir()
|
|
|
|
|
)
|
2018-05-04 09:02:53 +02:00
|
|
|
if not os.path.isdir(v):
|
|
|
|
|
raise Exception("Missing builddir. Set NM_TEST_CLIENT_BUILDDIR?")
|
|
|
|
|
elif name == ENV_NM_TEST_CLIENT_NMCLI_PATH:
|
|
|
|
|
v = os.environ.get(ENV_NM_TEST_CLIENT_NMCLI_PATH, None)
|
|
|
|
|
if v is None:
|
|
|
|
|
try:
|
2020-06-09 16:28:32 -04:00
|
|
|
v = os.path.abspath(
|
2021-03-14 09:26:51 +01:00
|
|
|
self.get(ENV_NM_TEST_CLIENT_BUILDDIR) + "/src/nmcli/nmcli"
|
2020-06-09 16:28:32 -04:00
|
|
|
)
|
2018-05-04 09:02:53 +02:00
|
|
|
except:
|
|
|
|
|
pass
|
|
|
|
|
if not os.path.exists(v):
|
|
|
|
|
raise Exception("Missing nmcli binary. Set NM_TEST_CLIENT_NMCLI_PATH?")
|
2023-03-02 19:35:38 +01:00
|
|
|
elif name == ENV_NM_TEST_CLIENT_CLOUD_SETUP_PATH:
|
|
|
|
|
v = os.environ.get(ENV_NM_TEST_CLIENT_CLOUD_SETUP_PATH, None)
|
|
|
|
|
if v is None:
|
|
|
|
|
try:
|
|
|
|
|
v = os.path.abspath(
|
|
|
|
|
self.get(ENV_NM_TEST_CLIENT_BUILDDIR)
|
|
|
|
|
+ "/src/nm-cloud-setup/nm-cloud-setup"
|
|
|
|
|
)
|
|
|
|
|
except:
|
|
|
|
|
pass
|
|
|
|
|
if not os.path.exists(v):
|
|
|
|
|
raise Exception(
|
|
|
|
|
"Missing nm-cloud-setup binary. Set NM_TEST_CLIENT_CLOUD_SETUP_PATH?"
|
|
|
|
|
)
|
2018-05-04 09:02:53 +02:00
|
|
|
elif name == ENV_NM_TEST_CLIENT_CHECK_L10N:
|
|
|
|
|
# if we test locales other than 'C', the output of nmcli depends on whether
|
|
|
|
|
# nmcli can load the translations. Unfortunately, I cannot find a way to
|
|
|
|
|
# make gettext use the po/*.gmo files from the build-dir.
|
|
|
|
|
#
|
|
|
|
|
# hence, such tests only work, if you also issue `make-install`
|
|
|
|
|
#
|
|
|
|
|
# Only by setting NM_TEST_CLIENT_CHECK_L10N=1, these tests are included
|
|
|
|
|
# as well.
|
2023-02-03 14:06:46 +01:00
|
|
|
v = Util.is_bool(os.environ.get(ENV_NM_TEST_CLIENT_CHECK_L10N, None))
|
2018-05-04 09:02:53 +02:00
|
|
|
elif name == ENV_NM_TEST_REGENERATE:
|
|
|
|
|
# in the "regenerate" mode, the tests will rewrite the files on disk against
|
|
|
|
|
# which we assert. That is useful, if there are intentional changes and
|
|
|
|
|
# we want to regenerate the expected output.
|
2023-02-03 14:06:46 +01:00
|
|
|
v = Util.is_bool(os.environ.get(ENV_NM_TEST_REGENERATE, None))
|
2019-09-26 13:45:15 +02:00
|
|
|
elif name == ENV_NM_TEST_WITH_LINENO:
|
2023-02-03 14:06:46 +01:00
|
|
|
v = Util.is_bool(os.environ.get(ENV_NM_TEST_WITH_LINENO, None))
|
2023-02-03 14:07:25 +01:00
|
|
|
elif name == ENV_NM_TEST_VALGRIND:
|
|
|
|
|
if self.get(ENV_NM_TEST_REGENERATE):
|
|
|
|
|
v = False
|
|
|
|
|
else:
|
|
|
|
|
v = os.environ.get(ENV_NM_TEST_VALGRIND, None)
|
|
|
|
|
if v:
|
|
|
|
|
v = Util.is_bool(v)
|
|
|
|
|
else:
|
|
|
|
|
v = Util.is_bool(os.environ.get("NMTST_USE_VALGRIND", None))
|
2020-06-09 16:28:32 -04:00
|
|
|
elif name in [
|
|
|
|
|
ENV_NM_TEST_ASAN_OPTIONS,
|
|
|
|
|
ENV_NM_TEST_LSAN_OPTIONS,
|
|
|
|
|
ENV_NM_TEST_UBSAN_OPTIONS,
|
|
|
|
|
]:
|
2020-05-13 22:17:50 +02:00
|
|
|
v = os.environ.get(name, None)
|
|
|
|
|
if v is None:
|
|
|
|
|
if name == ENV_NM_TEST_ASAN_OPTIONS:
|
2020-06-09 16:28:32 -04:00
|
|
|
v = "detect_leaks=1"
|
|
|
|
|
# v += ' fast_unwind_on_malloc=false'
|
2020-05-13 22:17:50 +02:00
|
|
|
elif name == ENV_NM_TEST_LSAN_OPTIONS:
|
2020-06-09 16:28:32 -04:00
|
|
|
v = ""
|
2020-05-13 22:17:50 +02:00
|
|
|
elif name == ENV_NM_TEST_UBSAN_OPTIONS:
|
2020-06-09 16:28:32 -04:00
|
|
|
v = "print_stacktrace=1:halt_on_error=1"
|
2020-05-13 22:17:50 +02:00
|
|
|
else:
|
2020-06-09 16:28:32 -04:00
|
|
|
assert False
|
2023-02-03 14:07:25 +01:00
|
|
|
elif name == ENV_LIBTOOL:
|
|
|
|
|
v = os.environ.get(name, None)
|
|
|
|
|
if v is None:
|
|
|
|
|
v = os.path.abspath(
|
|
|
|
|
os.path.dirname(self.get(ENV_NM_TEST_CLIENT_NMCLI_PATH))
|
|
|
|
|
+ "/../../libtool"
|
|
|
|
|
)
|
|
|
|
|
if not os.path.isfile(v):
|
|
|
|
|
v = None
|
|
|
|
|
else:
|
|
|
|
|
v = [v]
|
|
|
|
|
elif not v:
|
|
|
|
|
v = None
|
|
|
|
|
else:
|
|
|
|
|
v = shlex.split(v)
|
2018-05-04 09:02:53 +02:00
|
|
|
else:
|
|
|
|
|
raise Exception()
|
|
|
|
|
self._values[name] = v
|
|
|
|
|
return v
|
|
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
|
2018-05-04 09:02:53 +02:00
|
|
|
conf = Configuration()
|
|
|
|
|
|
|
|
|
|
###############################################################################
|
|
|
|
|
|
|
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
class NMStubServer:
|
2018-05-04 09:02:53 +02:00
|
|
|
@staticmethod
|
|
|
|
|
def _conn_get_main_object(conn):
|
|
|
|
|
try:
|
2020-06-09 16:28:32 -04:00
|
|
|
return conn.get_object(
|
|
|
|
|
"org.freedesktop.NetworkManager", "/org/freedesktop/NetworkManager"
|
|
|
|
|
)
|
2018-05-04 09:02:53 +02:00
|
|
|
except:
|
|
|
|
|
return None
|
|
|
|
|
|
clients/tests: seed generated numbers for test-networkmanager-service.py
At several places, "test-networkmanager-service.py" uses generated numbers
with a defined seed. For example, generated connection's UUID is
generated in a predictable, but randomized way (if you forgive the
inprecise use of the word "random" in context of using a deterministic
seed).
Aside the connection's UUID, this becomes more interesting in the next commit
where the stub server generates a list of IP and DHCP settings in a predictable
randomized way.
For "clients/tests" we spawn the test service multiple times, but also
create similar environments by calling init_001(). This is done for
convenience, where out of lazyness all the tests share one setup. But it's
still a good idea that these tests generate slightly different setups,
wherever applicable. this increases the possible setups which get tested.
For example, the number of static IPv4 addresses (the following commit) is
interested to explicitly test for zero or a non-zero number of
addresses. If all tests happen to use the same seed, the tests are expected
to also generate the same number of addresses, and we miss an opportunity to
hit interesting test cases.
There is still no guarantee that all interesting cases are hit, the chances are just
better. The approach of generating the setup randomly, does not preclude that
the stub-server allows to explicitly configure the setup. However, due to the
sheer number of combinations that might be interesting to test, it's much simpler
to rely on some randomization and have the justifid hope we catch interesting cases.
Also in terms of runtime of the test, the cli unit tests should complete within
few seconds. Testing every combination would result in huge tests and long runtimes.
Also, the patch refactors generating random numbers in
"test-networkmanager-service.py". For example, it introduces
Util.RandomSeed(), which can be used to generate a sequence of different
random numbers. It works by having an internal state and a counter which is
combined to chain the seed and generate different numbers on each call.
2018-06-07 17:42:31 +02:00
|
|
|
def __init__(self, seed):
|
2018-05-04 09:02:53 +02:00
|
|
|
service_path = PathConfiguration.test_networkmanager_service_path()
|
|
|
|
|
self._conn = dbus.SessionBus()
|
clients/tests: seed generated numbers for test-networkmanager-service.py
At several places, "test-networkmanager-service.py" uses generated numbers
with a defined seed. For example, generated connection's UUID is
generated in a predictable, but randomized way (if you forgive the
inprecise use of the word "random" in context of using a deterministic
seed).
Aside the connection's UUID, this becomes more interesting in the next commit
where the stub server generates a list of IP and DHCP settings in a predictable
randomized way.
For "clients/tests" we spawn the test service multiple times, but also
create similar environments by calling init_001(). This is done for
convenience, where out of lazyness all the tests share one setup. But it's
still a good idea that these tests generate slightly different setups,
wherever applicable. this increases the possible setups which get tested.
For example, the number of static IPv4 addresses (the following commit) is
interested to explicitly test for zero or a non-zero number of
addresses. If all tests happen to use the same seed, the tests are expected
to also generate the same number of addresses, and we miss an opportunity to
hit interesting test cases.
There is still no guarantee that all interesting cases are hit, the chances are just
better. The approach of generating the setup randomly, does not preclude that
the stub-server allows to explicitly configure the setup. However, due to the
sheer number of combinations that might be interesting to test, it's much simpler
to rely on some randomization and have the justifid hope we catch interesting cases.
Also in terms of runtime of the test, the cli unit tests should complete within
few seconds. Testing every combination would result in huge tests and long runtimes.
Also, the patch refactors generating random numbers in
"test-networkmanager-service.py". For example, it introduces
Util.RandomSeed(), which can be used to generate a sequence of different
random numbers. It works by having an internal state and a counter which is
combined to chain the seed and generate different numbers on each call.
2018-06-07 17:42:31 +02:00
|
|
|
env = os.environ.copy()
|
2020-06-09 16:28:32 -04:00
|
|
|
env["NM_TEST_NETWORKMANAGER_SERVICE_SEED"] = seed
|
|
|
|
|
p = subprocess.Popen(
|
|
|
|
|
[sys.executable, service_path], stdin=subprocess.PIPE, env=env
|
|
|
|
|
)
|
2018-05-04 09:02:53 +02:00
|
|
|
|
2018-06-15 14:42:11 +02:00
|
|
|
start = NM.utils_get_timestamp_msec()
|
2018-05-04 09:02:53 +02:00
|
|
|
while True:
|
|
|
|
|
if p.poll() is not None:
|
|
|
|
|
p.stdin.close()
|
|
|
|
|
if p.returncode == 77:
|
2020-06-09 16:28:32 -04:00
|
|
|
raise unittest.SkipTest(
|
|
|
|
|
"the stub service %s exited with status 77" % (service_path)
|
|
|
|
|
)
|
|
|
|
|
raise Exception(
|
|
|
|
|
"the stub service %s exited unexpectedly" % (service_path)
|
|
|
|
|
)
|
2018-05-04 09:02:53 +02:00
|
|
|
nmobj = self._conn_get_main_object(self._conn)
|
|
|
|
|
if nmobj is not None:
|
|
|
|
|
break
|
2019-05-25 11:03:11 +02:00
|
|
|
if (NM.utils_get_timestamp_msec() - start) >= 4000:
|
2018-05-04 09:02:53 +02:00
|
|
|
p.stdin.close()
|
|
|
|
|
p.kill()
|
clients/tests: don't wait for first job before scheduling parallel jobs
Previously, the test would kick off 15 processes in parallel, but
the first job in the queue would block more processes from being
started.
That is, async_start() would only start 15 processes, but since none of
them were reaped before async_wait() was called, no more than 15 jobs
were running during the start phase. That is not a real issue, because
the start phase is non-blocking and queues all the jobs quickly. It's
not really expected that during that time many processes already completed.
Anyway, this was a bit ugly.
The bigger problem is that async_wait() would always block for the
first job to complete, before starting more processes. That means,
if the first job in the queue takes unusually long, then this blocks
other processes from getting reaped and new processes from being
started.
Instead, don't block only one one jobs, but poll them in turn for a
short amount of time. Whichever process exits first will be completed
and more jobs will be started.
In fact, in the current setup it's hard to notice any difference,
because all nmcli invocations take about the same time and are
relatively fast. That this approach parallelizes better can be seen
when the runtime of jobs varies stronger (and some invocations take
a notably longer time). As we later want to run nmcli under valgrind,
this probably will make a difference.
An alternative would be not to poll()/wait() for child processes,
but somehow get notified. For example, we could use a GMainContext
and watch child processes. But that's probably more complicated
to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
|
|
|
Util.popen_wait(p, 1)
|
2020-06-09 16:28:32 -04:00
|
|
|
raise Exception(
|
|
|
|
|
"after starting stub service the D-Bus name was not claimed in time"
|
|
|
|
|
)
|
2018-05-04 09:02:53 +02:00
|
|
|
|
|
|
|
|
self._nmobj = nmobj
|
2020-06-09 16:28:32 -04:00
|
|
|
self._nmiface = dbus.Interface(
|
|
|
|
|
nmobj, "org.freedesktop.NetworkManager.LibnmGlibTest"
|
|
|
|
|
)
|
2018-05-04 09:02:53 +02:00
|
|
|
self._p = p
|
|
|
|
|
|
2023-02-07 12:56:37 +01:00
|
|
|
def shutdown(self, kill_mode="random"):
|
clients/tests: don't wait for first job before scheduling parallel jobs
Previously, the test would kick off 15 processes in parallel, but
the first job in the queue would block more processes from being
started.
That is, async_start() would only start 15 processes, but since none of
them were reaped before async_wait() was called, no more than 15 jobs
were running during the start phase. That is not a real issue, because
the start phase is non-blocking and queues all the jobs quickly. It's
not really expected that during that time many processes already completed.
Anyway, this was a bit ugly.
The bigger problem is that async_wait() would always block for the
first job to complete, before starting more processes. That means,
if the first job in the queue takes unusually long, then this blocks
other processes from getting reaped and new processes from being
started.
Instead, don't block only one one jobs, but poll them in turn for a
short amount of time. Whichever process exits first will be completed
and more jobs will be started.
In fact, in the current setup it's hard to notice any difference,
because all nmcli invocations take about the same time and are
relatively fast. That this approach parallelizes better can be seen
when the runtime of jobs varies stronger (and some invocations take
a notably longer time). As we later want to run nmcli under valgrind,
this probably will make a difference.
An alternative would be not to poll()/wait() for child processes,
but somehow get notified. For example, we could use a GMainContext
and watch child processes. But that's probably more complicated
to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
|
|
|
conn = self._conn
|
|
|
|
|
p = self._p
|
2018-05-04 09:02:53 +02:00
|
|
|
self._nmobj = None
|
|
|
|
|
self._nmiface = None
|
|
|
|
|
self._conn = None
|
|
|
|
|
self._p = None
|
2023-02-07 12:56:37 +01:00
|
|
|
|
|
|
|
|
# The test stub service watches stdin and will do a proper
|
|
|
|
|
# shutdown when it closes. That means, to send signals about
|
|
|
|
|
# going away.
|
|
|
|
|
# On the other hand, just killing it will cause the process
|
|
|
|
|
# from dropping off the bus.
|
|
|
|
|
if kill_mode == "kill":
|
|
|
|
|
p.kill()
|
|
|
|
|
elif kill_mode == "stdin-close":
|
|
|
|
|
p.stdin.close()
|
|
|
|
|
else:
|
|
|
|
|
assert kill_mode == "random"
|
|
|
|
|
ops = [p.stdin.close, p.kill]
|
|
|
|
|
random.shuffle(ops)
|
|
|
|
|
ops[0]()
|
|
|
|
|
r = random.random()
|
|
|
|
|
if r < 0.75:
|
|
|
|
|
if r < 0.5:
|
|
|
|
|
time.sleep(r * 0.2)
|
|
|
|
|
ops[1]()
|
|
|
|
|
|
clients/tests: don't wait for first job before scheduling parallel jobs
Previously, the test would kick off 15 processes in parallel, but
the first job in the queue would block more processes from being
started.
That is, async_start() would only start 15 processes, but since none of
them were reaped before async_wait() was called, no more than 15 jobs
were running during the start phase. That is not a real issue, because
the start phase is non-blocking and queues all the jobs quickly. It's
not really expected that during that time many processes already completed.
Anyway, this was a bit ugly.
The bigger problem is that async_wait() would always block for the
first job to complete, before starting more processes. That means,
if the first job in the queue takes unusually long, then this blocks
other processes from getting reaped and new processes from being
started.
Instead, don't block only one one jobs, but poll them in turn for a
short amount of time. Whichever process exits first will be completed
and more jobs will be started.
In fact, in the current setup it's hard to notice any difference,
because all nmcli invocations take about the same time and are
relatively fast. That this approach parallelizes better can be seen
when the runtime of jobs varies stronger (and some invocations take
a notably longer time). As we later want to run nmcli under valgrind,
this probably will make a difference.
An alternative would be not to poll()/wait() for child processes,
but somehow get notified. For example, we could use a GMainContext
and watch child processes. But that's probably more complicated
to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
|
|
|
if Util.popen_wait(p, 1) is None:
|
|
|
|
|
raise Exception("Stub service did not exit in time")
|
2023-02-07 12:56:37 +01:00
|
|
|
p.stdin.close()
|
clients/tests: don't wait for first job before scheduling parallel jobs
Previously, the test would kick off 15 processes in parallel, but
the first job in the queue would block more processes from being
started.
That is, async_start() would only start 15 processes, but since none of
them were reaped before async_wait() was called, no more than 15 jobs
were running during the start phase. That is not a real issue, because
the start phase is non-blocking and queues all the jobs quickly. It's
not really expected that during that time many processes already completed.
Anyway, this was a bit ugly.
The bigger problem is that async_wait() would always block for the
first job to complete, before starting more processes. That means,
if the first job in the queue takes unusually long, then this blocks
other processes from getting reaped and new processes from being
started.
Instead, don't block only one one jobs, but poll them in turn for a
short amount of time. Whichever process exits first will be completed
and more jobs will be started.
In fact, in the current setup it's hard to notice any difference,
because all nmcli invocations take about the same time and are
relatively fast. That this approach parallelizes better can be seen
when the runtime of jobs varies stronger (and some invocations take
a notably longer time). As we later want to run nmcli under valgrind,
this probably will make a difference.
An alternative would be not to poll()/wait() for child processes,
but somehow get notified. For example, we could use a GMainContext
and watch child processes. But that's probably more complicated
to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
|
|
|
if self._conn_get_main_object(conn) is not None:
|
2020-06-09 16:28:32 -04:00
|
|
|
raise Exception(
|
|
|
|
|
"Stub service is not still here although it should shut down"
|
|
|
|
|
)
|
2018-05-04 09:02:53 +02:00
|
|
|
|
|
|
|
|
class _MethodProxy:
|
|
|
|
|
def __init__(self, parent, method_name):
|
|
|
|
|
self._parent = parent
|
|
|
|
|
self._method_name = method_name
|
2020-06-09 16:28:32 -04:00
|
|
|
|
2018-05-04 09:02:53 +02:00
|
|
|
def __call__(self, *args, **kwargs):
|
2020-06-09 16:28:32 -04:00
|
|
|
dbus_iface = kwargs.pop("dbus_iface", None)
|
2018-05-04 09:02:53 +02:00
|
|
|
if dbus_iface is None:
|
|
|
|
|
dbus_iface = self._parent._nmiface
|
|
|
|
|
method = dbus_iface.get_dbus_method(self._method_name)
|
|
|
|
|
if kwargs:
|
|
|
|
|
# for convenience, we allow the caller to specify arguments
|
|
|
|
|
# as kwargs. In this case, we construct a a{sv} array as last argument.
|
|
|
|
|
args = list(args)
|
2019-12-13 10:03:18 +01:00
|
|
|
args.append(kwargs)
|
2018-05-04 09:02:53 +02:00
|
|
|
return method(*args)
|
|
|
|
|
|
|
|
|
|
def __getattr__(self, member):
|
|
|
|
|
if not member.startswith("op_"):
|
|
|
|
|
raise AttributeError(member)
|
|
|
|
|
return self._MethodProxy(self, member[3:])
|
|
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
def addConnection(self, connection, do_verify_strict=True):
|
2018-06-04 20:33:35 +02:00
|
|
|
return self.op_AddConnection(connection, do_verify_strict)
|
2018-05-04 09:02:53 +02:00
|
|
|
|
2019-12-13 10:03:18 +01:00
|
|
|
def findConnections(self, **kwargs):
|
|
|
|
|
if kwargs:
|
|
|
|
|
lst = self.op_FindConnections(**kwargs)
|
|
|
|
|
else:
|
2020-06-09 16:28:32 -04:00
|
|
|
lst = self.op_FindConnections({})
|
|
|
|
|
return list([(str(elem[0]), str(elem[1]), str(elem[2])) for elem in lst])
|
2019-12-13 10:03:18 +01:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
def findConnectionUuid(self, con_id, required=True):
|
2018-05-04 09:02:53 +02:00
|
|
|
try:
|
2020-06-09 16:28:32 -04:00
|
|
|
u = Util.iter_single(self.findConnections(con_id=con_id))[1]
|
|
|
|
|
assert u, "Invalid uuid %s" % (u)
|
2018-05-04 09:02:53 +02:00
|
|
|
except Exception as e:
|
2018-06-07 10:27:10 +02:00
|
|
|
if not required:
|
|
|
|
|
return None
|
2020-06-09 16:28:32 -04:00
|
|
|
raise AssertionError(
|
|
|
|
|
"Unexpectedly not found connection %s: %s" % (con_id, str(e))
|
|
|
|
|
)
|
2018-05-04 09:02:53 +02:00
|
|
|
return u
|
|
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
def setProperty(self, path, propname, value, iface_name=None):
|
2018-05-15 16:20:00 +02:00
|
|
|
if iface_name is None:
|
2020-06-09 16:28:32 -04:00
|
|
|
iface_name = ""
|
2020-10-29 07:14:56 +01:00
|
|
|
self.op_SetProperties([(path, [(iface_name, [(propname, value)])])])
|
2020-06-09 16:28:32 -04:00
|
|
|
|
2023-03-02 19:35:38 +01:00
|
|
|
def addAndActivateConnection(
|
|
|
|
|
self, connection, device, specific_object="", delay=None
|
|
|
|
|
):
|
|
|
|
|
if delay is not None:
|
|
|
|
|
self.op_SetActiveConnectionStateChangedDelay(device, delay)
|
|
|
|
|
nm_iface = self._conn_get_main_object(self._conn)
|
|
|
|
|
self.op_AddAndActivateConnection(
|
|
|
|
|
connection, device, specific_object, dbus_iface=nm_iface
|
|
|
|
|
)
|
|
|
|
|
|
2018-05-15 16:20:00 +02:00
|
|
|
|
2018-05-04 09:02:53 +02:00
|
|
|
###############################################################################
|
|
|
|
|
|
2018-05-28 14:01:25 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
class AsyncProcess:
|
2021-03-19 11:33:18 +01:00
|
|
|
def __init__(self, args, env, complete_cb, max_waittime_msec=20000):
|
clients/tests: don't wait for first job before scheduling parallel jobs
Previously, the test would kick off 15 processes in parallel, but
the first job in the queue would block more processes from being
started.
That is, async_start() would only start 15 processes, but since none of
them were reaped before async_wait() was called, no more than 15 jobs
were running during the start phase. That is not a real issue, because
the start phase is non-blocking and queues all the jobs quickly. It's
not really expected that during that time many processes already completed.
Anyway, this was a bit ugly.
The bigger problem is that async_wait() would always block for the
first job to complete, before starting more processes. That means,
if the first job in the queue takes unusually long, then this blocks
other processes from getting reaped and new processes from being
started.
Instead, don't block only one one jobs, but poll them in turn for a
short amount of time. Whichever process exits first will be completed
and more jobs will be started.
In fact, in the current setup it's hard to notice any difference,
because all nmcli invocations take about the same time and are
relatively fast. That this approach parallelizes better can be seen
when the runtime of jobs varies stronger (and some invocations take
a notably longer time). As we later want to run nmcli under valgrind,
this probably will make a difference.
An alternative would be not to poll()/wait() for child processes,
but somehow get notified. For example, we could use a GMainContext
and watch child processes. But that's probably more complicated
to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
|
|
|
self._args = list(args)
|
2018-05-28 14:01:25 +02:00
|
|
|
self._env = env
|
|
|
|
|
self._complete_cb = complete_cb
|
clients/tests: don't wait for first job before scheduling parallel jobs
Previously, the test would kick off 15 processes in parallel, but
the first job in the queue would block more processes from being
started.
That is, async_start() would only start 15 processes, but since none of
them were reaped before async_wait() was called, no more than 15 jobs
were running during the start phase. That is not a real issue, because
the start phase is non-blocking and queues all the jobs quickly. It's
not really expected that during that time many processes already completed.
Anyway, this was a bit ugly.
The bigger problem is that async_wait() would always block for the
first job to complete, before starting more processes. That means,
if the first job in the queue takes unusually long, then this blocks
other processes from getting reaped and new processes from being
started.
Instead, don't block only one one jobs, but poll them in turn for a
short amount of time. Whichever process exits first will be completed
and more jobs will be started.
In fact, in the current setup it's hard to notice any difference,
because all nmcli invocations take about the same time and are
relatively fast. That this approach parallelizes better can be seen
when the runtime of jobs varies stronger (and some invocations take
a notably longer time). As we later want to run nmcli under valgrind,
this probably will make a difference.
An alternative would be not to poll()/wait() for child processes,
but somehow get notified. For example, we could use a GMainContext
and watch child processes. But that's probably more complicated
to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
|
|
|
self._max_waittime_msec = max_waittime_msec
|
2018-05-28 14:01:25 +02:00
|
|
|
|
|
|
|
|
def start(self):
|
2020-06-09 16:28:32 -04:00
|
|
|
if not hasattr(self, "_p"):
|
clients/tests: don't wait for first job before scheduling parallel jobs
Previously, the test would kick off 15 processes in parallel, but
the first job in the queue would block more processes from being
started.
That is, async_start() would only start 15 processes, but since none of
them were reaped before async_wait() was called, no more than 15 jobs
were running during the start phase. That is not a real issue, because
the start phase is non-blocking and queues all the jobs quickly. It's
not really expected that during that time many processes already completed.
Anyway, this was a bit ugly.
The bigger problem is that async_wait() would always block for the
first job to complete, before starting more processes. That means,
if the first job in the queue takes unusually long, then this blocks
other processes from getting reaped and new processes from being
started.
Instead, don't block only one one jobs, but poll them in turn for a
short amount of time. Whichever process exits first will be completed
and more jobs will be started.
In fact, in the current setup it's hard to notice any difference,
because all nmcli invocations take about the same time and are
relatively fast. That this approach parallelizes better can be seen
when the runtime of jobs varies stronger (and some invocations take
a notably longer time). As we later want to run nmcli under valgrind,
this probably will make a difference.
An alternative would be not to poll()/wait() for child processes,
but somehow get notified. For example, we could use a GMainContext
and watch child processes. But that's probably more complicated
to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
|
|
|
self._p_start_timestamp = NM.utils_get_timestamp_msec()
|
2020-06-09 16:28:32 -04:00
|
|
|
self._p_stdout_buf = b""
|
|
|
|
|
self._p_stderr_buf = b""
|
|
|
|
|
self._p = subprocess.Popen(
|
|
|
|
|
self._args,
|
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
|
stderr=subprocess.PIPE,
|
|
|
|
|
env=self._env,
|
|
|
|
|
)
|
2019-11-06 15:30:55 +01:00
|
|
|
Util.buffer_set_nonblock(self._p.stdout)
|
|
|
|
|
Util.buffer_set_nonblock(self._p.stderr)
|
2018-05-28 14:01:25 +02:00
|
|
|
|
clients/tests: don't wait for first job before scheduling parallel jobs
Previously, the test would kick off 15 processes in parallel, but
the first job in the queue would block more processes from being
started.
That is, async_start() would only start 15 processes, but since none of
them were reaped before async_wait() was called, no more than 15 jobs
were running during the start phase. That is not a real issue, because
the start phase is non-blocking and queues all the jobs quickly. It's
not really expected that during that time many processes already completed.
Anyway, this was a bit ugly.
The bigger problem is that async_wait() would always block for the
first job to complete, before starting more processes. That means,
if the first job in the queue takes unusually long, then this blocks
other processes from getting reaped and new processes from being
started.
Instead, don't block only one one jobs, but poll them in turn for a
short amount of time. Whichever process exits first will be completed
and more jobs will be started.
In fact, in the current setup it's hard to notice any difference,
because all nmcli invocations take about the same time and are
relatively fast. That this approach parallelizes better can be seen
when the runtime of jobs varies stronger (and some invocations take
a notably longer time). As we later want to run nmcli under valgrind,
this probably will make a difference.
An alternative would be not to poll()/wait() for child processes,
but somehow get notified. For example, we could use a GMainContext
and watch child processes. But that's probably more complicated
to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
|
|
|
def _timeout_remaining_time(self):
|
|
|
|
|
# note that we call this during poll() and wait_and_complete().
|
|
|
|
|
# we don't know the exact time when the process terminated,
|
|
|
|
|
# so this is only approximately correct, if we call poll/wait
|
|
|
|
|
# frequently.
|
|
|
|
|
# Worst case, we will think that the process did not time out,
|
|
|
|
|
# when in fact it was running longer than max-waittime.
|
2020-06-09 16:28:32 -04:00
|
|
|
return self._max_waittime_msec - (
|
|
|
|
|
NM.utils_get_timestamp_msec() - self._p_start_timestamp
|
|
|
|
|
)
|
2018-05-28 14:01:25 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
def poll(self, timeout=0):
|
2018-05-28 14:01:25 +02:00
|
|
|
self.start()
|
|
|
|
|
|
2019-11-06 15:30:55 +01:00
|
|
|
(return_code, b_stdout, b_stderr) = Util.popen_wait_read(self._p, timeout)
|
|
|
|
|
|
|
|
|
|
self._p_stdout_buf += b_stdout
|
|
|
|
|
self._p_stderr_buf += b_stderr
|
|
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
if return_code is None and self._timeout_remaining_time() <= 0:
|
|
|
|
|
raise Exception(
|
|
|
|
|
"process is still running after timeout: %s" % (" ".join(self._args))
|
|
|
|
|
)
|
clients/tests: don't wait for first job before scheduling parallel jobs
Previously, the test would kick off 15 processes in parallel, but
the first job in the queue would block more processes from being
started.
That is, async_start() would only start 15 processes, but since none of
them were reaped before async_wait() was called, no more than 15 jobs
were running during the start phase. That is not a real issue, because
the start phase is non-blocking and queues all the jobs quickly. It's
not really expected that during that time many processes already completed.
Anyway, this was a bit ugly.
The bigger problem is that async_wait() would always block for the
first job to complete, before starting more processes. That means,
if the first job in the queue takes unusually long, then this blocks
other processes from getting reaped and new processes from being
started.
Instead, don't block only one one jobs, but poll them in turn for a
short amount of time. Whichever process exits first will be completed
and more jobs will be started.
In fact, in the current setup it's hard to notice any difference,
because all nmcli invocations take about the same time and are
relatively fast. That this approach parallelizes better can be seen
when the runtime of jobs varies stronger (and some invocations take
a notably longer time). As we later want to run nmcli under valgrind,
this probably will make a difference.
An alternative would be not to poll()/wait() for child processes,
but somehow get notified. For example, we could use a GMainContext
and watch child processes. But that's probably more complicated
to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
|
|
|
return return_code
|
2018-05-28 14:01:25 +02:00
|
|
|
|
clients/tests: don't wait for first job before scheduling parallel jobs
Previously, the test would kick off 15 processes in parallel, but
the first job in the queue would block more processes from being
started.
That is, async_start() would only start 15 processes, but since none of
them were reaped before async_wait() was called, no more than 15 jobs
were running during the start phase. That is not a real issue, because
the start phase is non-blocking and queues all the jobs quickly. It's
not really expected that during that time many processes already completed.
Anyway, this was a bit ugly.
The bigger problem is that async_wait() would always block for the
first job to complete, before starting more processes. That means,
if the first job in the queue takes unusually long, then this blocks
other processes from getting reaped and new processes from being
started.
Instead, don't block only one one jobs, but poll them in turn for a
short amount of time. Whichever process exits first will be completed
and more jobs will be started.
In fact, in the current setup it's hard to notice any difference,
because all nmcli invocations take about the same time and are
relatively fast. That this approach parallelizes better can be seen
when the runtime of jobs varies stronger (and some invocations take
a notably longer time). As we later want to run nmcli under valgrind,
this probably will make a difference.
An alternative would be not to poll()/wait() for child processes,
but somehow get notified. For example, we could use a GMainContext
and watch child processes. But that's probably more complicated
to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
|
|
|
def wait_and_complete(self):
|
|
|
|
|
self.start()
|
2018-05-28 14:01:25 +02:00
|
|
|
|
clients/tests: don't wait for first job before scheduling parallel jobs
Previously, the test would kick off 15 processes in parallel, but
the first job in the queue would block more processes from being
started.
That is, async_start() would only start 15 processes, but since none of
them were reaped before async_wait() was called, no more than 15 jobs
were running during the start phase. That is not a real issue, because
the start phase is non-blocking and queues all the jobs quickly. It's
not really expected that during that time many processes already completed.
Anyway, this was a bit ugly.
The bigger problem is that async_wait() would always block for the
first job to complete, before starting more processes. That means,
if the first job in the queue takes unusually long, then this blocks
other processes from getting reaped and new processes from being
started.
Instead, don't block only one one jobs, but poll them in turn for a
short amount of time. Whichever process exits first will be completed
and more jobs will be started.
In fact, in the current setup it's hard to notice any difference,
because all nmcli invocations take about the same time and are
relatively fast. That this approach parallelizes better can be seen
when the runtime of jobs varies stronger (and some invocations take
a notably longer time). As we later want to run nmcli under valgrind,
this probably will make a difference.
An alternative would be not to poll()/wait() for child processes,
but somehow get notified. For example, we could use a GMainContext
and watch child processes. But that's probably more complicated
to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
|
|
|
p = self._p
|
|
|
|
|
self._p = None
|
2018-05-28 14:01:25 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
(return_code, b_stdout, b_stderr) = Util.popen_wait_read(
|
|
|
|
|
p, max(0, self._timeout_remaining_time()) / 1000
|
|
|
|
|
)
|
clients/tests: don't wait for first job before scheduling parallel jobs
Previously, the test would kick off 15 processes in parallel, but
the first job in the queue would block more processes from being
started.
That is, async_start() would only start 15 processes, but since none of
them were reaped before async_wait() was called, no more than 15 jobs
were running during the start phase. That is not a real issue, because
the start phase is non-blocking and queues all the jobs quickly. It's
not really expected that during that time many processes already completed.
Anyway, this was a bit ugly.
The bigger problem is that async_wait() would always block for the
first job to complete, before starting more processes. That means,
if the first job in the queue takes unusually long, then this blocks
other processes from getting reaped and new processes from being
started.
Instead, don't block only one one jobs, but poll them in turn for a
short amount of time. Whichever process exits first will be completed
and more jobs will be started.
In fact, in the current setup it's hard to notice any difference,
because all nmcli invocations take about the same time and are
relatively fast. That this approach parallelizes better can be seen
when the runtime of jobs varies stronger (and some invocations take
a notably longer time). As we later want to run nmcli under valgrind,
this probably will make a difference.
An alternative would be not to poll()/wait() for child processes,
but somehow get notified. For example, we could use a GMainContext
and watch child processes. But that's probably more complicated
to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
|
|
|
(stdout, stderr) = (p.stdout.read(), p.stderr.read())
|
|
|
|
|
p.stdout.close()
|
|
|
|
|
p.stderr.close()
|
|
|
|
|
|
2019-11-06 15:30:55 +01:00
|
|
|
stdout = self._p_stdout_buf + b_stdout + stdout
|
|
|
|
|
stderr = self._p_stderr_buf + b_stderr + stderr
|
|
|
|
|
del self._p_stdout_buf
|
|
|
|
|
del self._p_stderr_buf
|
|
|
|
|
|
clients/tests: don't wait for first job before scheduling parallel jobs
Previously, the test would kick off 15 processes in parallel, but
the first job in the queue would block more processes from being
started.
That is, async_start() would only start 15 processes, but since none of
them were reaped before async_wait() was called, no more than 15 jobs
were running during the start phase. That is not a real issue, because
the start phase is non-blocking and queues all the jobs quickly. It's
not really expected that during that time many processes already completed.
Anyway, this was a bit ugly.
The bigger problem is that async_wait() would always block for the
first job to complete, before starting more processes. That means,
if the first job in the queue takes unusually long, then this blocks
other processes from getting reaped and new processes from being
started.
Instead, don't block only one one jobs, but poll them in turn for a
short amount of time. Whichever process exits first will be completed
and more jobs will be started.
In fact, in the current setup it's hard to notice any difference,
because all nmcli invocations take about the same time and are
relatively fast. That this approach parallelizes better can be seen
when the runtime of jobs varies stronger (and some invocations take
a notably longer time). As we later want to run nmcli under valgrind,
this probably will make a difference.
An alternative would be not to poll()/wait() for child processes,
but somehow get notified. For example, we could use a GMainContext
and watch child processes. But that's probably more complicated
to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
|
|
|
if return_code is None:
|
|
|
|
|
print(stdout)
|
|
|
|
|
print(stderr)
|
2020-06-09 16:28:32 -04:00
|
|
|
raise Exception(
|
|
|
|
|
"process did not complete in time: %s" % (" ".join(self._args))
|
|
|
|
|
)
|
clients/tests: don't wait for first job before scheduling parallel jobs
Previously, the test would kick off 15 processes in parallel, but
the first job in the queue would block more processes from being
started.
That is, async_start() would only start 15 processes, but since none of
them were reaped before async_wait() was called, no more than 15 jobs
were running during the start phase. That is not a real issue, because
the start phase is non-blocking and queues all the jobs quickly. It's
not really expected that during that time many processes already completed.
Anyway, this was a bit ugly.
The bigger problem is that async_wait() would always block for the
first job to complete, before starting more processes. That means,
if the first job in the queue takes unusually long, then this blocks
other processes from getting reaped and new processes from being
started.
Instead, don't block only one one jobs, but poll them in turn for a
short amount of time. Whichever process exits first will be completed
and more jobs will be started.
In fact, in the current setup it's hard to notice any difference,
because all nmcli invocations take about the same time and are
relatively fast. That this approach parallelizes better can be seen
when the runtime of jobs varies stronger (and some invocations take
a notably longer time). As we later want to run nmcli under valgrind,
this probably will make a difference.
An alternative would be not to poll()/wait() for child processes,
but somehow get notified. For example, we could use a GMainContext
and watch child processes. But that's probably more complicated
to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
|
|
|
|
|
|
|
|
self._complete_cb(self, return_code, stdout, stderr)
|
2018-05-28 14:01:25 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
|
2018-05-28 14:01:25 +02:00
|
|
|
###############################################################################
|
|
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
|
2023-02-07 12:14:31 +01:00
|
|
|
MAX_JOBS = 15
|
|
|
|
|
|
|
|
|
|
|
2023-03-02 19:31:53 +01:00
|
|
|
class TestNmClient(unittest.TestCase):
|
2022-04-07 10:10:57 +02:00
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
|
self._calling_num = {}
|
|
|
|
|
self._skip_test_for_l10n_diff = []
|
|
|
|
|
self._async_jobs = []
|
|
|
|
|
self._results = []
|
|
|
|
|
self.srv = None
|
2023-02-07 12:14:31 +01:00
|
|
|
unittest.TestCase.__init__(self, *args, **kwargs)
|
2018-05-04 09:02:53 +02:00
|
|
|
|
2023-02-07 11:56:38 +01:00
|
|
|
def srv_start(self):
|
|
|
|
|
self.srv_shutdown()
|
|
|
|
|
self.srv = NMStubServer(self._testMethodName)
|
|
|
|
|
|
|
|
|
|
def srv_shutdown(self):
|
|
|
|
|
if self.srv is not None:
|
|
|
|
|
srv = self.srv
|
|
|
|
|
self.srv = None
|
|
|
|
|
srv.shutdown()
|
|
|
|
|
|
2022-04-20 08:37:09 +02:00
|
|
|
def ReplaceTextConUuid(self, con_name, replacement):
|
|
|
|
|
return Util.ReplaceTextSimple(
|
|
|
|
|
Util.memoize_nullary(lambda: self.srv.findConnectionUuid(con_name)),
|
|
|
|
|
replacement,
|
|
|
|
|
)
|
|
|
|
|
|
2018-07-20 16:32:07 +02:00
|
|
|
@staticmethod
|
|
|
|
|
def _read_expected(filename):
|
|
|
|
|
results_expect = []
|
|
|
|
|
content_expect = Util.file_read(filename)
|
|
|
|
|
try:
|
|
|
|
|
base_idx = 0
|
2020-06-09 16:28:32 -04:00
|
|
|
size_prefix = "size: ".encode("utf8")
|
2018-07-20 16:32:07 +02:00
|
|
|
while True:
|
2020-06-09 16:28:32 -04:00
|
|
|
if not content_expect[base_idx : base_idx + 10].startswith(size_prefix):
|
2018-07-20 16:32:07 +02:00
|
|
|
raise Exception("Unexpected token")
|
|
|
|
|
j = base_idx + len(size_prefix)
|
|
|
|
|
i = j
|
|
|
|
|
if Util.python_has_version(3, 0):
|
2020-06-09 16:28:32 -04:00
|
|
|
eol = ord("\n")
|
2018-07-20 16:32:07 +02:00
|
|
|
else:
|
2020-06-09 16:28:32 -04:00
|
|
|
eol = "\n"
|
2018-07-20 16:32:07 +02:00
|
|
|
while content_expect[i] != eol:
|
|
|
|
|
i += 1
|
|
|
|
|
i = i + 1 + int(content_expect[j:i])
|
|
|
|
|
results_expect.append(content_expect[base_idx:i])
|
|
|
|
|
if len(content_expect) == i:
|
|
|
|
|
break
|
|
|
|
|
base_idx = i
|
|
|
|
|
except Exception as e:
|
|
|
|
|
results_expect = None
|
|
|
|
|
|
|
|
|
|
return content_expect, results_expect
|
|
|
|
|
|
2023-03-02 19:31:53 +01:00
|
|
|
def _env(
|
|
|
|
|
self, lang="C", calling_num=None, fatal_warnings=_DEFAULT_ARG, extra_env=None
|
|
|
|
|
):
|
|
|
|
|
if lang == "C":
|
|
|
|
|
language = ""
|
|
|
|
|
elif lang == "de_DE.utf8":
|
|
|
|
|
language = "de"
|
|
|
|
|
elif lang == "pl_PL.UTF-8":
|
|
|
|
|
language = "pl"
|
|
|
|
|
else:
|
|
|
|
|
self.fail("invalid language %s" % (lang))
|
|
|
|
|
|
|
|
|
|
env = {}
|
|
|
|
|
for k in [
|
|
|
|
|
"LD_LIBRARY_PATH",
|
|
|
|
|
"DBUS_SESSION_BUS_ADDRESS",
|
|
|
|
|
"LIBNM_CLIENT_DEBUG",
|
|
|
|
|
"LIBNM_CLIENT_DEBUG_FILE",
|
|
|
|
|
]:
|
|
|
|
|
val = os.environ.get(k, None)
|
|
|
|
|
if val is not None:
|
|
|
|
|
env[k] = val
|
|
|
|
|
env["LANG"] = lang
|
|
|
|
|
env["LANGUAGE"] = language
|
|
|
|
|
env["LIBNM_USE_SESSION_BUS"] = "1"
|
|
|
|
|
env["LIBNM_USE_NO_UDEV"] = "1"
|
|
|
|
|
env["TERM"] = "linux"
|
|
|
|
|
env["ASAN_OPTIONS"] = conf.get(ENV_NM_TEST_ASAN_OPTIONS)
|
|
|
|
|
env["LSAN_OPTIONS"] = conf.get(ENV_NM_TEST_LSAN_OPTIONS)
|
|
|
|
|
env["LBSAN_OPTIONS"] = conf.get(ENV_NM_TEST_UBSAN_OPTIONS)
|
|
|
|
|
env["XDG_CONFIG_HOME"] = PathConfiguration.srcdir()
|
|
|
|
|
if calling_num is not None:
|
|
|
|
|
env["NM_TEST_CALLING_NUM"] = str(calling_num)
|
|
|
|
|
if fatal_warnings is _DEFAULT_ARG or fatal_warnings:
|
|
|
|
|
env["G_DEBUG"] = "fatal-warnings"
|
|
|
|
|
if extra_env is not None:
|
|
|
|
|
for k, v in extra_env.items():
|
|
|
|
|
env[k] = v
|
|
|
|
|
return env
|
|
|
|
|
|
|
|
|
|
def cmd_construct_argv(self, cmd_path, args, with_valgrind=None):
|
2023-02-03 14:07:25 +01:00
|
|
|
|
|
|
|
|
if with_valgrind is None:
|
|
|
|
|
with_valgrind = conf.get(ENV_NM_TEST_VALGRIND)
|
|
|
|
|
|
|
|
|
|
valgrind_log = None
|
2023-03-02 19:31:53 +01:00
|
|
|
cmd = conf.get(cmd_path)
|
2023-02-03 14:07:25 +01:00
|
|
|
if with_valgrind:
|
|
|
|
|
valgrind_log = tempfile.mkstemp(prefix="nm-test-client-valgrind.")
|
|
|
|
|
argv = [
|
|
|
|
|
"valgrind",
|
|
|
|
|
"--quiet",
|
|
|
|
|
"--error-exitcode=37",
|
|
|
|
|
"--leak-check=full",
|
|
|
|
|
"--gen-suppressions=all",
|
|
|
|
|
(
|
|
|
|
|
"--suppressions="
|
|
|
|
|
+ PathConfiguration.top_srcdir()
|
|
|
|
|
+ "/valgrind.suppressions"
|
|
|
|
|
),
|
|
|
|
|
"--num-callers=100",
|
|
|
|
|
"--log-file=" + valgrind_log[1],
|
|
|
|
|
cmd,
|
|
|
|
|
]
|
|
|
|
|
libtool = conf.get(ENV_LIBTOOL)
|
|
|
|
|
if libtool:
|
|
|
|
|
argv = list(libtool) + ["--mode=execute"] + argv
|
|
|
|
|
else:
|
|
|
|
|
argv = [cmd]
|
|
|
|
|
|
|
|
|
|
argv.extend(args)
|
|
|
|
|
return argv, valgrind_log
|
|
|
|
|
|
2023-03-02 19:31:53 +01:00
|
|
|
def call_pexpect(self, cmd_path, args, extra_env):
|
|
|
|
|
argv, valgrind_log = self.cmd_construct_argv(cmd_path, args)
|
|
|
|
|
env = self._env(extra_env=extra_env)
|
|
|
|
|
|
|
|
|
|
pexp = pexpect.spawn(argv[0], argv[1:], timeout=10, env=env)
|
|
|
|
|
|
|
|
|
|
typ = collections.namedtuple("CallPexpect", ["pexp", "valgrind_log"])
|
|
|
|
|
return typ(pexp, valgrind_log)
|
|
|
|
|
|
|
|
|
|
def async_start(self, wait_all=False):
|
|
|
|
|
|
|
|
|
|
while True:
|
|
|
|
|
|
|
|
|
|
while True:
|
|
|
|
|
for async_job in list(self._async_jobs[0:MAX_JOBS]):
|
|
|
|
|
async_job.start()
|
|
|
|
|
# start up to MAX_JOBS jobs, but poll() and complete those
|
|
|
|
|
# that are already exited. Retry, until there are no more
|
|
|
|
|
# jobs to start, or until MAX_JOBS are running.
|
|
|
|
|
jobs_running = []
|
|
|
|
|
for async_job in list(self._async_jobs[0:MAX_JOBS]):
|
|
|
|
|
if async_job.poll() is not None:
|
|
|
|
|
self._async_jobs.remove(async_job)
|
|
|
|
|
async_job.wait_and_complete()
|
|
|
|
|
continue
|
|
|
|
|
jobs_running.append(async_job)
|
|
|
|
|
if len(jobs_running) >= len(self._async_jobs):
|
|
|
|
|
break
|
|
|
|
|
if len(jobs_running) >= MAX_JOBS:
|
|
|
|
|
break
|
|
|
|
|
|
|
|
|
|
if not jobs_running:
|
|
|
|
|
return
|
|
|
|
|
if not wait_all:
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
# in a loop, indefinitely poll the running jobs until we find one that
|
|
|
|
|
# completes. Note that poll() itself will raise an exception if a
|
|
|
|
|
# jobs times out.
|
|
|
|
|
for async_job in Util.random_job(jobs_running):
|
|
|
|
|
if async_job.poll(timeout=0.03) is not None:
|
|
|
|
|
self._async_jobs.remove(async_job)
|
|
|
|
|
async_job.wait_and_complete()
|
|
|
|
|
break
|
|
|
|
|
|
|
|
|
|
def async_wait(self):
|
|
|
|
|
return self.async_start(wait_all=True)
|
|
|
|
|
|
|
|
|
|
def _nm_test_post(self):
|
|
|
|
|
|
|
|
|
|
self.async_wait()
|
|
|
|
|
|
|
|
|
|
self.srv_shutdown()
|
|
|
|
|
|
|
|
|
|
self._calling_num = None
|
|
|
|
|
|
|
|
|
|
results = self._results
|
|
|
|
|
self._results = None
|
|
|
|
|
|
|
|
|
|
if len(results) == 0:
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
skip_test_for_l10n_diff = self._skip_test_for_l10n_diff
|
|
|
|
|
self._skip_test_for_l10n_diff = None
|
|
|
|
|
|
|
|
|
|
test_name = self._testMethodName
|
|
|
|
|
|
|
|
|
|
filename = os.path.abspath(
|
|
|
|
|
PathConfiguration.srcdir()
|
|
|
|
|
+ "/test-client.check-on-disk/"
|
|
|
|
|
+ test_name
|
|
|
|
|
+ ".expected"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
regenerate = conf.get(ENV_NM_TEST_REGENERATE)
|
|
|
|
|
|
|
|
|
|
content_expect, results_expect = self._read_expected(filename)
|
|
|
|
|
|
|
|
|
|
if results_expect is None:
|
|
|
|
|
if not regenerate:
|
|
|
|
|
self.fail(
|
|
|
|
|
"Failed to parse expected file '%s'. Let the test write the file by rerunning with NM_TEST_REGENERATE=1"
|
|
|
|
|
% (filename)
|
|
|
|
|
)
|
|
|
|
|
else:
|
|
|
|
|
for i in range(0, min(len(results_expect), len(results))):
|
|
|
|
|
n = results[i]
|
|
|
|
|
if results_expect[i] == n["content"]:
|
|
|
|
|
continue
|
|
|
|
|
if regenerate:
|
|
|
|
|
continue
|
|
|
|
|
if n["ignore_l10n_diff"]:
|
|
|
|
|
skip_test_for_l10n_diff.append(n["test_name"])
|
|
|
|
|
continue
|
|
|
|
|
print(
|
|
|
|
|
"\n\n\nThe file '%s' does not have the expected content:"
|
|
|
|
|
% (filename)
|
|
|
|
|
)
|
|
|
|
|
print("ACTUAL OUTPUT:\n[[%s]]\n" % (n["content"]))
|
|
|
|
|
print("EXPECT OUTPUT:\n[[%s]]\n" % (results_expect[i]))
|
|
|
|
|
print(
|
|
|
|
|
"Let the test write the file by rerunning with NM_TEST_REGENERATE=1"
|
|
|
|
|
)
|
|
|
|
|
print(
|
|
|
|
|
"See howto in %s for details.\n"
|
|
|
|
|
% (PathConfiguration.canonical_script_filename())
|
|
|
|
|
)
|
|
|
|
|
sys.stdout.flush()
|
|
|
|
|
self.fail(
|
|
|
|
|
"Unexpected output of command, expected %s. Rerun test with NM_TEST_REGENERATE=1 to regenerate files"
|
|
|
|
|
% (filename)
|
|
|
|
|
)
|
|
|
|
|
if len(results_expect) != len(results):
|
|
|
|
|
if not regenerate:
|
|
|
|
|
print(
|
|
|
|
|
"\n\n\nThe number of tests in %s does not match the expected content (%s vs %s):"
|
|
|
|
|
% (filename, len(results_expect), len(results))
|
|
|
|
|
)
|
|
|
|
|
if len(results_expect) < len(results):
|
|
|
|
|
print(
|
|
|
|
|
"ACTUAL OUTPUT:\n[[%s]]\n"
|
|
|
|
|
% (results[len(results_expect)]["content"])
|
|
|
|
|
)
|
|
|
|
|
else:
|
|
|
|
|
print(
|
|
|
|
|
"EXPECT OUTPUT:\n[[%s]]\n" % (results_expect[len(results)])
|
|
|
|
|
)
|
|
|
|
|
print(
|
|
|
|
|
"Let the test write the file by rerunning with NM_TEST_REGENERATE=1"
|
|
|
|
|
)
|
|
|
|
|
print(
|
|
|
|
|
"See howto in %s for details.\n"
|
|
|
|
|
% (PathConfiguration.canonical_script_filename())
|
|
|
|
|
)
|
|
|
|
|
sys.stdout.flush()
|
|
|
|
|
self.fail(
|
|
|
|
|
"Unexpected output of command, expected %s. Rerun test with NM_TEST_REGENERATE=1 to regenerate files"
|
|
|
|
|
% (filename)
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
if regenerate:
|
|
|
|
|
content_new = b"".join([r["content"] for r in results])
|
|
|
|
|
if content_new != content_expect:
|
|
|
|
|
try:
|
|
|
|
|
with open(filename, "wb") as content_file:
|
|
|
|
|
content_file.write(content_new)
|
|
|
|
|
except Exception as e:
|
|
|
|
|
self.fail("Failure to write '%s': %s" % (filename, e))
|
|
|
|
|
|
|
|
|
|
if skip_test_for_l10n_diff:
|
|
|
|
|
# nmcli loads translations from the installation path. This failure commonly
|
|
|
|
|
# happens because you did not install the binary in the --prefix, before
|
|
|
|
|
# running the test. Hence, translations are not available or differ.
|
|
|
|
|
self.skipTest(
|
|
|
|
|
"Skipped asserting for localized tests %s. Set NM_TEST_CLIENT_CHECK_L10N=1 to force fail."
|
|
|
|
|
% (",".join(skip_test_for_l10n_diff))
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
|
if not dbus_session_inited:
|
|
|
|
|
self.skipTest(
|
|
|
|
|
"Own D-Bus session for testing is not initialized. Do you have dbus-run-session available?"
|
|
|
|
|
)
|
|
|
|
|
if NM is None:
|
|
|
|
|
self.skipTest("gi.NM is not available. Did you build with introspection?")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestNmcli(TestNmClient):
|
2020-06-09 16:28:32 -04:00
|
|
|
def call_nmcli_l(
|
|
|
|
|
self,
|
|
|
|
|
args,
|
|
|
|
|
check_on_disk=_DEFAULT_ARG,
|
|
|
|
|
fatal_warnings=_DEFAULT_ARG,
|
|
|
|
|
expected_returncode=_DEFAULT_ARG,
|
|
|
|
|
expected_stdout=_DEFAULT_ARG,
|
|
|
|
|
expected_stderr=_DEFAULT_ARG,
|
|
|
|
|
replace_stdout=None,
|
|
|
|
|
replace_stderr=None,
|
|
|
|
|
replace_cmd=None,
|
|
|
|
|
sort_lines_stdout=False,
|
|
|
|
|
extra_env=None,
|
|
|
|
|
sync_barrier=False,
|
|
|
|
|
):
|
2018-05-14 16:33:10 +02:00
|
|
|
frame = sys._getframe(1)
|
2020-06-09 16:28:32 -04:00
|
|
|
for lang in ["C", "pl"]:
|
|
|
|
|
self._call_nmcli(
|
|
|
|
|
args,
|
|
|
|
|
lang,
|
|
|
|
|
check_on_disk,
|
|
|
|
|
fatal_warnings,
|
|
|
|
|
expected_returncode,
|
|
|
|
|
expected_stdout,
|
|
|
|
|
expected_stderr,
|
|
|
|
|
replace_stdout,
|
|
|
|
|
replace_stderr,
|
|
|
|
|
replace_cmd,
|
|
|
|
|
sort_lines_stdout,
|
|
|
|
|
extra_env,
|
|
|
|
|
sync_barrier,
|
|
|
|
|
frame,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def call_nmcli(
|
|
|
|
|
self,
|
|
|
|
|
args,
|
|
|
|
|
langs=None,
|
|
|
|
|
lang=None,
|
|
|
|
|
check_on_disk=_DEFAULT_ARG,
|
|
|
|
|
fatal_warnings=_DEFAULT_ARG,
|
|
|
|
|
expected_returncode=_DEFAULT_ARG,
|
|
|
|
|
expected_stdout=_DEFAULT_ARG,
|
|
|
|
|
expected_stderr=_DEFAULT_ARG,
|
|
|
|
|
replace_stdout=None,
|
|
|
|
|
replace_stderr=None,
|
|
|
|
|
replace_cmd=None,
|
|
|
|
|
sort_lines_stdout=False,
|
|
|
|
|
extra_env=None,
|
|
|
|
|
sync_barrier=None,
|
|
|
|
|
):
|
2018-05-04 09:02:53 +02:00
|
|
|
|
|
|
|
|
frame = sys._getframe(1)
|
|
|
|
|
|
2018-05-14 16:33:10 +02:00
|
|
|
if langs is not None:
|
|
|
|
|
assert lang is None
|
|
|
|
|
else:
|
|
|
|
|
if lang is None:
|
2020-06-09 16:28:32 -04:00
|
|
|
lang = "C"
|
2018-05-14 16:33:10 +02:00
|
|
|
langs = [lang]
|
|
|
|
|
|
2018-05-28 14:01:25 +02:00
|
|
|
if sync_barrier is None:
|
2020-06-09 16:28:32 -04:00
|
|
|
sync_barrier = len(langs) == 1
|
2018-05-28 14:01:25 +02:00
|
|
|
|
2018-05-14 16:33:10 +02:00
|
|
|
for lang in langs:
|
2020-06-09 16:28:32 -04:00
|
|
|
self._call_nmcli(
|
|
|
|
|
args,
|
|
|
|
|
lang,
|
|
|
|
|
check_on_disk,
|
|
|
|
|
fatal_warnings,
|
|
|
|
|
expected_returncode,
|
|
|
|
|
expected_stdout,
|
|
|
|
|
expected_stderr,
|
|
|
|
|
replace_stdout,
|
|
|
|
|
replace_stderr,
|
|
|
|
|
replace_cmd,
|
|
|
|
|
sort_lines_stdout,
|
|
|
|
|
extra_env,
|
|
|
|
|
sync_barrier,
|
|
|
|
|
frame,
|
|
|
|
|
)
|
|
|
|
|
|
2022-06-28 15:16:38 +02:00
|
|
|
def call_nmcli_pexpect(self, args):
|
2023-03-02 19:31:53 +01:00
|
|
|
return self.call_pexpect(ENV_NM_TEST_CLIENT_NMCLI_PATH, args, {"NO_COLOR": "1"})
|
2022-06-28 15:16:38 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
def _call_nmcli(
|
|
|
|
|
self,
|
|
|
|
|
args,
|
|
|
|
|
lang,
|
|
|
|
|
check_on_disk,
|
|
|
|
|
fatal_warnings,
|
|
|
|
|
expected_returncode,
|
|
|
|
|
expected_stdout,
|
|
|
|
|
expected_stderr,
|
|
|
|
|
replace_stdout,
|
|
|
|
|
replace_stderr,
|
|
|
|
|
replace_cmd,
|
|
|
|
|
sort_lines_stdout,
|
|
|
|
|
extra_env,
|
|
|
|
|
sync_barrier,
|
|
|
|
|
frame,
|
|
|
|
|
):
|
2018-05-04 09:02:53 +02:00
|
|
|
|
2018-05-28 14:01:25 +02:00
|
|
|
if sync_barrier:
|
|
|
|
|
self.async_wait()
|
|
|
|
|
|
2018-05-14 16:33:10 +02:00
|
|
|
calling_fcn = frame.f_code.co_name
|
2018-05-04 09:02:53 +02:00
|
|
|
calling_num = self._calling_num.get(calling_fcn, 0) + 1
|
|
|
|
|
self._calling_num[calling_fcn] = calling_num
|
|
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
test_name = "%s-%03d" % (calling_fcn, calling_num)
|
2018-05-28 14:01:25 +02:00
|
|
|
|
|
|
|
|
# we cannot use frame.f_code.co_filename directly, because it might be different depending
|
|
|
|
|
# on where the file lies and which is CWD. We still want to give the location of
|
|
|
|
|
# the file, so that the user can easier find the source (when looking at the .expected files)
|
2020-06-09 16:28:32 -04:00
|
|
|
self.assertTrue(
|
|
|
|
|
os.path.abspath(frame.f_code.co_filename).endswith(
|
|
|
|
|
"/" + PathConfiguration.canonical_script_filename()
|
|
|
|
|
)
|
|
|
|
|
)
|
2018-05-28 14:01:25 +02:00
|
|
|
|
2019-09-26 13:45:15 +02:00
|
|
|
if conf.get(ENV_NM_TEST_WITH_LINENO):
|
2020-06-09 16:28:32 -04:00
|
|
|
calling_location = "%s:%d:%s()/%d" % (
|
|
|
|
|
PathConfiguration.canonical_script_filename(),
|
|
|
|
|
frame.f_lineno,
|
|
|
|
|
frame.f_code.co_name,
|
|
|
|
|
calling_num,
|
|
|
|
|
)
|
2019-09-26 13:45:15 +02:00
|
|
|
else:
|
2020-06-09 16:28:32 -04:00
|
|
|
calling_location = "%s:%s()/%d" % (
|
|
|
|
|
PathConfiguration.canonical_script_filename(),
|
|
|
|
|
frame.f_code.co_name,
|
|
|
|
|
calling_num,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
if lang is None or lang == "C":
|
|
|
|
|
lang = "C"
|
|
|
|
|
elif lang == "de":
|
|
|
|
|
lang = "de_DE.utf8"
|
|
|
|
|
elif lang == "pl":
|
|
|
|
|
lang = "pl_PL.UTF-8"
|
2018-05-04 09:02:53 +02:00
|
|
|
else:
|
2020-06-09 16:28:32 -04:00
|
|
|
self.fail("invalid language %s" % (lang))
|
2018-05-04 09:02:53 +02:00
|
|
|
|
2023-02-03 14:07:25 +01:00
|
|
|
# Running under valgrind is not yet supported for those tests.
|
2023-03-02 19:31:53 +01:00
|
|
|
args, valgrind_log = self.cmd_construct_argv(
|
|
|
|
|
ENV_NM_TEST_CLIENT_NMCLI_PATH, args, with_valgrind=False
|
|
|
|
|
)
|
2023-02-03 14:07:25 +01:00
|
|
|
|
|
|
|
|
assert valgrind_log is None
|
2018-05-04 09:02:53 +02:00
|
|
|
|
2018-05-28 14:01:25 +02:00
|
|
|
if replace_stdout is not None:
|
|
|
|
|
replace_stdout = list(replace_stdout)
|
|
|
|
|
if replace_stderr is not None:
|
|
|
|
|
replace_stderr = list(replace_stderr)
|
2019-12-13 10:02:27 +01:00
|
|
|
if replace_cmd is not None:
|
|
|
|
|
replace_cmd = list(replace_cmd)
|
2018-05-04 09:02:53 +02:00
|
|
|
|
|
|
|
|
if check_on_disk is _DEFAULT_ARG:
|
2020-06-09 16:28:32 -04:00
|
|
|
check_on_disk = (
|
|
|
|
|
expected_returncode is _DEFAULT_ARG
|
|
|
|
|
and (
|
|
|
|
|
expected_stdout is _DEFAULT_ARG
|
|
|
|
|
or expected_stdout is _UNSTABLE_OUTPUT
|
|
|
|
|
)
|
|
|
|
|
and (
|
|
|
|
|
expected_stderr is _DEFAULT_ARG
|
|
|
|
|
or expected_stderr is _UNSTABLE_OUTPUT
|
|
|
|
|
)
|
|
|
|
|
)
|
2018-05-04 09:02:53 +02:00
|
|
|
if expected_returncode is _DEFAULT_ARG:
|
|
|
|
|
expected_returncode = None
|
|
|
|
|
if expected_stdout is _DEFAULT_ARG:
|
|
|
|
|
expected_stdout = None
|
|
|
|
|
if expected_stderr is _DEFAULT_ARG:
|
|
|
|
|
expected_stderr = None
|
|
|
|
|
|
clients/tests: don't wait for first job before scheduling parallel jobs
Previously, the test would kick off 15 processes in parallel, but
the first job in the queue would block more processes from being
started.
That is, async_start() would only start 15 processes, but since none of
them were reaped before async_wait() was called, no more than 15 jobs
were running during the start phase. That is not a real issue, because
the start phase is non-blocking and queues all the jobs quickly. It's
not really expected that during that time many processes already completed.
Anyway, this was a bit ugly.
The bigger problem is that async_wait() would always block for the
first job to complete, before starting more processes. That means,
if the first job in the queue takes unusually long, then this blocks
other processes from getting reaped and new processes from being
started.
Instead, don't block only one one jobs, but poll them in turn for a
short amount of time. Whichever process exits first will be completed
and more jobs will be started.
In fact, in the current setup it's hard to notice any difference,
because all nmcli invocations take about the same time and are
relatively fast. That this approach parallelizes better can be seen
when the runtime of jobs varies stronger (and some invocations take
a notably longer time). As we later want to run nmcli under valgrind,
this probably will make a difference.
An alternative would be not to poll()/wait() for child processes,
but somehow get notified. For example, we could use a GMainContext
and watch child processes. But that's probably more complicated
to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
|
|
|
results_idx = len(self._results)
|
|
|
|
|
self._results.append(None)
|
|
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
def complete_cb(async_job, returncode, stdout, stderr):
|
2018-05-28 14:01:25 +02:00
|
|
|
|
2018-06-11 16:36:47 +02:00
|
|
|
if expected_stdout is _UNSTABLE_OUTPUT:
|
2020-06-09 16:28:32 -04:00
|
|
|
stdout = "<UNSTABLE OUTPUT>".encode("utf-8")
|
2018-06-11 16:36:47 +02:00
|
|
|
else:
|
|
|
|
|
stdout = Util.replace_text(stdout, replace_stdout)
|
|
|
|
|
|
|
|
|
|
if expected_stderr is _UNSTABLE_OUTPUT:
|
2020-06-09 16:28:32 -04:00
|
|
|
stderr = "<UNSTABLE OUTPUT>".encode("utf-8")
|
2018-06-11 16:36:47 +02:00
|
|
|
else:
|
|
|
|
|
stderr = Util.replace_text(stderr, replace_stderr)
|
2018-05-28 14:01:25 +02:00
|
|
|
|
|
|
|
|
if sort_lines_stdout:
|
2020-06-09 16:28:32 -04:00
|
|
|
stdout = b"\n".join(sorted(stdout.split(b"\n")))
|
2018-05-28 14:01:25 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
ignore_l10n_diff = lang != "C" and not conf.get(
|
|
|
|
|
ENV_NM_TEST_CLIENT_CHECK_L10N
|
|
|
|
|
)
|
2018-05-28 14:01:25 +02:00
|
|
|
|
2018-06-11 16:36:47 +02:00
|
|
|
if expected_stderr is not None and expected_stderr is not _UNSTABLE_OUTPUT:
|
2018-05-28 14:01:25 +02:00
|
|
|
if expected_stderr != stderr:
|
|
|
|
|
if ignore_l10n_diff:
|
|
|
|
|
self._skip_test_for_l10n_diff.append(test_name)
|
|
|
|
|
else:
|
|
|
|
|
self.assertEqual(expected_stderr, stderr)
|
2018-06-11 16:36:47 +02:00
|
|
|
if expected_stdout is not None and expected_stdout is not _UNSTABLE_OUTPUT:
|
2018-05-28 14:01:25 +02:00
|
|
|
if expected_stdout != stdout:
|
|
|
|
|
if ignore_l10n_diff:
|
|
|
|
|
self._skip_test_for_l10n_diff.append(test_name)
|
|
|
|
|
else:
|
|
|
|
|
self.assertEqual(expected_stdout, stdout)
|
|
|
|
|
if expected_returncode is not None:
|
|
|
|
|
self.assertEqual(expected_returncode, returncode)
|
|
|
|
|
|
2018-06-11 15:30:51 +02:00
|
|
|
if fatal_warnings is _DEFAULT_ARG:
|
|
|
|
|
if expected_returncode != -5:
|
|
|
|
|
self.assertNotEqual(returncode, -5)
|
|
|
|
|
elif fatal_warnings:
|
|
|
|
|
if expected_returncode is None:
|
2020-06-09 16:28:32 -04:00
|
|
|
self.assertEqual(returncode, -5)
|
2018-06-11 15:30:51 +02:00
|
|
|
|
2018-07-20 16:32:07 +02:00
|
|
|
if check_on_disk:
|
2023-02-03 13:26:32 +01:00
|
|
|
cmd = "$NMCLI %s" % (Util.shlex_join(args[1:]),)
|
2019-12-13 10:02:27 +01:00
|
|
|
cmd = Util.replace_text(cmd, replace_cmd)
|
2018-05-04 09:02:53 +02:00
|
|
|
|
2020-03-23 13:20:35 +01:00
|
|
|
if returncode < 0:
|
2020-06-09 16:28:32 -04:00
|
|
|
returncode_str = "%d (SIGNAL %s)" % (
|
|
|
|
|
returncode,
|
|
|
|
|
Util.signal_no_to_str(-returncode),
|
|
|
|
|
)
|
2020-03-23 13:20:35 +01:00
|
|
|
else:
|
2020-06-09 16:28:32 -04:00
|
|
|
returncode_str = "%d" % (returncode)
|
|
|
|
|
|
|
|
|
|
content = (
|
|
|
|
|
("location: %s\n" % (calling_location)).encode("utf8")
|
|
|
|
|
+ ("cmd: %s\n" % (cmd)).encode("utf8")
|
|
|
|
|
+ ("lang: %s\n" % (lang)).encode("utf8")
|
|
|
|
|
+ ("returncode: %s\n" % (returncode_str)).encode("utf8")
|
|
|
|
|
)
|
2018-07-20 16:32:07 +02:00
|
|
|
if len(stdout) > 0:
|
2020-06-09 16:28:32 -04:00
|
|
|
content += (
|
|
|
|
|
("stdout: %d bytes\n>>>\n" % (len(stdout))).encode("utf8")
|
|
|
|
|
+ stdout
|
|
|
|
|
+ "\n<<<\n".encode("utf8")
|
|
|
|
|
)
|
2018-07-20 16:32:07 +02:00
|
|
|
if len(stderr) > 0:
|
2020-06-09 16:28:32 -04:00
|
|
|
content += (
|
|
|
|
|
("stderr: %d bytes\n>>>\n" % (len(stderr))).encode("utf8")
|
|
|
|
|
+ stderr
|
|
|
|
|
+ "\n<<<\n".encode("utf8")
|
|
|
|
|
)
|
|
|
|
|
content = ("size: %s\n" % (len(content))).encode("utf8") + content
|
2018-07-20 16:32:07 +02:00
|
|
|
|
clients/tests: don't wait for first job before scheduling parallel jobs
Previously, the test would kick off 15 processes in parallel, but
the first job in the queue would block more processes from being
started.
That is, async_start() would only start 15 processes, but since none of
them were reaped before async_wait() was called, no more than 15 jobs
were running during the start phase. That is not a real issue, because
the start phase is non-blocking and queues all the jobs quickly. It's
not really expected that during that time many processes already completed.
Anyway, this was a bit ugly.
The bigger problem is that async_wait() would always block for the
first job to complete, before starting more processes. That means,
if the first job in the queue takes unusually long, then this blocks
other processes from getting reaped and new processes from being
started.
Instead, don't block only one one jobs, but poll them in turn for a
short amount of time. Whichever process exits first will be completed
and more jobs will be started.
In fact, in the current setup it's hard to notice any difference,
because all nmcli invocations take about the same time and are
relatively fast. That this approach parallelizes better can be seen
when the runtime of jobs varies stronger (and some invocations take
a notably longer time). As we later want to run nmcli under valgrind,
this probably will make a difference.
An alternative would be not to poll()/wait() for child processes,
but somehow get notified. For example, we could use a GMainContext
and watch child processes. But that's probably more complicated
to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
|
|
|
self._results[results_idx] = {
|
2020-06-09 16:28:32 -04:00
|
|
|
"test_name": test_name,
|
|
|
|
|
"ignore_l10n_diff": ignore_l10n_diff,
|
|
|
|
|
"content": content,
|
clients/tests: don't wait for first job before scheduling parallel jobs
Previously, the test would kick off 15 processes in parallel, but
the first job in the queue would block more processes from being
started.
That is, async_start() would only start 15 processes, but since none of
them were reaped before async_wait() was called, no more than 15 jobs
were running during the start phase. That is not a real issue, because
the start phase is non-blocking and queues all the jobs quickly. It's
not really expected that during that time many processes already completed.
Anyway, this was a bit ugly.
The bigger problem is that async_wait() would always block for the
first job to complete, before starting more processes. That means,
if the first job in the queue takes unusually long, then this blocks
other processes from getting reaped and new processes from being
started.
Instead, don't block only one one jobs, but poll them in turn for a
short amount of time. Whichever process exits first will be completed
and more jobs will be started.
In fact, in the current setup it's hard to notice any difference,
because all nmcli invocations take about the same time and are
relatively fast. That this approach parallelizes better can be seen
when the runtime of jobs varies stronger (and some invocations take
a notably longer time). As we later want to run nmcli under valgrind,
this probably will make a difference.
An alternative would be not to poll()/wait() for child processes,
but somehow get notified. For example, we could use a GMainContext
and watch child processes. But that's probably more complicated
to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
|
|
|
}
|
2018-05-04 09:02:53 +02:00
|
|
|
|
2022-06-28 15:16:38 +02:00
|
|
|
env = self._env(lang, calling_num, fatal_warnings, extra_env)
|
2020-06-09 16:28:32 -04:00
|
|
|
async_job = AsyncProcess(args=args, env=env, complete_cb=complete_cb)
|
2018-05-04 09:02:53 +02:00
|
|
|
|
2018-05-28 14:01:25 +02:00
|
|
|
self._async_jobs.append(async_job)
|
2018-05-04 09:02:53 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
self.async_start(wait_all=sync_barrier)
|
clients/tests: don't wait for first job before scheduling parallel jobs
Previously, the test would kick off 15 processes in parallel, but
the first job in the queue would block more processes from being
started.
That is, async_start() would only start 15 processes, but since none of
them were reaped before async_wait() was called, no more than 15 jobs
were running during the start phase. That is not a real issue, because
the start phase is non-blocking and queues all the jobs quickly. It's
not really expected that during that time many processes already completed.
Anyway, this was a bit ugly.
The bigger problem is that async_wait() would always block for the
first job to complete, before starting more processes. That means,
if the first job in the queue takes unusually long, then this blocks
other processes from getting reaped and new processes from being
started.
Instead, don't block only one one jobs, but poll them in turn for a
short amount of time. Whichever process exits first will be completed
and more jobs will be started.
In fact, in the current setup it's hard to notice any difference,
because all nmcli invocations take about the same time and are
relatively fast. That this approach parallelizes better can be seen
when the runtime of jobs varies stronger (and some invocations take
a notably longer time). As we later want to run nmcli under valgrind,
this probably will make a difference.
An alternative would be not to poll()/wait() for child processes,
but somehow get notified. For example, we could use a GMainContext
and watch child processes. But that's probably more complicated
to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
|
|
|
|
2018-07-20 16:32:07 +02:00
|
|
|
def nm_test(func):
|
|
|
|
|
def f(self):
|
2023-02-07 11:56:38 +01:00
|
|
|
self.srv_start()
|
2022-04-07 10:10:57 +02:00
|
|
|
func(self)
|
|
|
|
|
self._nm_test_post()
|
|
|
|
|
|
|
|
|
|
return f
|
|
|
|
|
|
|
|
|
|
def nm_test_no_dbus(func):
|
|
|
|
|
def f(self):
|
2018-07-20 16:32:07 +02:00
|
|
|
func(self)
|
|
|
|
|
self._nm_test_post()
|
2020-06-09 16:28:32 -04:00
|
|
|
|
2018-07-20 16:32:07 +02:00
|
|
|
return f
|
|
|
|
|
|
2023-03-02 19:31:53 +01:00
|
|
|
def skip_without_pexpect(func):
|
|
|
|
|
def f(self):
|
|
|
|
|
if pexpect is None:
|
|
|
|
|
raise unittest.SkipTest("pexpect not available")
|
|
|
|
|
func(self)
|
|
|
|
|
|
|
|
|
|
return f
|
2018-05-04 09:02:53 +02:00
|
|
|
|
|
|
|
|
def init_001(self):
|
2020-06-09 16:28:32 -04:00
|
|
|
self.srv.op_AddObj("WiredDevice", iface="eth0")
|
|
|
|
|
self.srv.op_AddObj("WiredDevice", iface="eth1")
|
|
|
|
|
self.srv.op_AddObj("WifiDevice", iface="wlan0")
|
|
|
|
|
self.srv.op_AddObj("WifiDevice", iface="wlan1")
|
2018-05-04 09:02:53 +02:00
|
|
|
|
|
|
|
|
# add another device with an identical ifname. The D-Bus API itself
|
|
|
|
|
# does not enforce the ifnames are unique.
|
2020-06-09 16:28:32 -04:00
|
|
|
self.srv.op_AddObj("WifiDevice", ident="wlan1/x", iface="wlan1")
|
2018-05-04 09:02:53 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
self.srv.op_AddObj("WifiAp", device="wlan0", rsnf=0x0)
|
2019-01-22 12:39:51 +01:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
self.srv.op_AddObj("WifiAp", device="wlan0")
|
2019-01-22 12:39:51 +01:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
NM_AP_FLAGS = getattr(NM, "80211ApSecurityFlags")
|
2019-01-22 12:39:51 +01:00
|
|
|
rsnf = 0x0
|
|
|
|
|
rsnf = rsnf | NM_AP_FLAGS.PAIR_TKIP
|
|
|
|
|
rsnf = rsnf | NM_AP_FLAGS.PAIR_CCMP
|
|
|
|
|
rsnf = rsnf | NM_AP_FLAGS.GROUP_TKIP
|
|
|
|
|
rsnf = rsnf | NM_AP_FLAGS.GROUP_CCMP
|
|
|
|
|
rsnf = rsnf | NM_AP_FLAGS.KEY_MGMT_SAE
|
2020-06-09 16:28:32 -04:00
|
|
|
self.srv.op_AddObj("WifiAp", device="wlan0", wpaf=0x0, rsnf=rsnf)
|
2018-05-04 09:02:53 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
self.srv.op_AddObj("WifiAp", device="wlan1")
|
2018-05-04 09:02:53 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
self.srv.addConnection(
|
2020-10-29 07:14:56 +01:00
|
|
|
{"connection": {"type": "802-3-ethernet", "id": "con-1"}}
|
2020-06-09 16:28:32 -04:00
|
|
|
)
|
2018-05-04 09:02:53 +02:00
|
|
|
|
2018-07-20 16:32:07 +02:00
|
|
|
@nm_test
|
2018-05-04 09:02:53 +02:00
|
|
|
def test_001(self):
|
|
|
|
|
|
2018-05-14 16:33:10 +02:00
|
|
|
self.call_nmcli_l([])
|
2018-05-04 09:02:53 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
self.call_nmcli_l(
|
|
|
|
|
["-f", "AP", "-mode", "multiline", "-p", "d", "show", "wlan0"]
|
|
|
|
|
)
|
2018-05-04 09:02:53 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
self.call_nmcli_l(["c", "s"])
|
2018-05-04 09:02:53 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
self.call_nmcli_l(["bogus", "s"])
|
2018-05-04 09:02:53 +02:00
|
|
|
|
2018-06-11 17:13:10 +02:00
|
|
|
for mode in Util.iter_nmcli_output_modes():
|
2020-06-09 16:28:32 -04:00
|
|
|
self.call_nmcli_l(mode + ["general", "permissions"])
|
2018-05-25 17:14:39 +02:00
|
|
|
|
2018-07-20 16:32:07 +02:00
|
|
|
@nm_test
|
2018-05-04 09:02:53 +02:00
|
|
|
def test_002(self):
|
|
|
|
|
self.init_001()
|
|
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
self.call_nmcli_l(["d"])
|
2018-05-04 09:02:53 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
self.call_nmcli_l(["-f", "all", "d"])
|
2018-05-04 09:02:53 +02:00
|
|
|
|
2018-05-14 16:33:10 +02:00
|
|
|
self.call_nmcli_l([])
|
2018-05-04 09:02:53 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
self.call_nmcli_l(["-f", "AP", "-mode", "multiline", "d", "show", "wlan0"])
|
|
|
|
|
self.call_nmcli_l(
|
|
|
|
|
["-f", "AP", "-mode", "multiline", "-p", "d", "show", "wlan0"]
|
|
|
|
|
)
|
|
|
|
|
self.call_nmcli_l(
|
|
|
|
|
["-f", "AP", "-mode", "multiline", "-t", "d", "show", "wlan0"]
|
|
|
|
|
)
|
|
|
|
|
self.call_nmcli_l(["-f", "AP", "-mode", "tabular", "d", "show", "wlan0"])
|
|
|
|
|
self.call_nmcli_l(["-f", "AP", "-mode", "tabular", "-p", "d", "show", "wlan0"])
|
|
|
|
|
self.call_nmcli_l(["-f", "AP", "-mode", "tabular", "-t", "d", "show", "wlan0"])
|
2018-05-04 09:02:53 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
self.call_nmcli_l(["-f", "ALL", "d", "wifi"])
|
2018-05-04 09:02:53 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
self.call_nmcli_l(["c"])
|
2018-05-14 14:18:03 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
self.call_nmcli_l(["c", "s", "con-1"])
|
2018-05-04 09:02:53 +02:00
|
|
|
|
2018-07-20 16:32:07 +02:00
|
|
|
@nm_test
|
2018-05-04 09:02:53 +02:00
|
|
|
def test_003(self):
|
2021-04-28 23:34:17 +02:00
|
|
|
con_gsm_list = [
|
|
|
|
|
("con-gsm1", "xyz.con-gsm1"),
|
|
|
|
|
("con-gsm2", ""),
|
|
|
|
|
("con-gsm3", " "),
|
|
|
|
|
]
|
|
|
|
|
|
2018-05-04 09:02:53 +02:00
|
|
|
self.init_001()
|
|
|
|
|
|
2019-12-13 09:32:23 +01:00
|
|
|
replace_uuids = []
|
2018-05-04 09:02:53 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
replace_uuids.append(
|
2022-04-20 08:37:09 +02:00
|
|
|
self.ReplaceTextConUuid("con-xx1", "UUID-con-xx1-REPLACED-REPLACED-REPLA")
|
2020-06-09 16:28:32 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
self.call_nmcli(
|
|
|
|
|
["c", "add", "type", "ethernet", "ifname", "*", "con-name", "con-xx1"],
|
|
|
|
|
replace_stdout=replace_uuids,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
self.call_nmcli_l(["c", "s"], replace_stdout=replace_uuids)
|
|
|
|
|
|
2021-04-28 23:34:17 +02:00
|
|
|
for con_name, apn in con_gsm_list:
|
|
|
|
|
|
|
|
|
|
replace_uuids.append(
|
2022-04-20 08:37:09 +02:00
|
|
|
self.ReplaceTextConUuid(
|
|
|
|
|
con_name, "UUID-" + con_name + "-REPLACED-REPLACED-REPL"
|
2021-04-28 23:34:17 +02:00
|
|
|
)
|
2020-06-09 16:28:32 -04:00
|
|
|
)
|
|
|
|
|
|
2021-04-28 23:34:17 +02:00
|
|
|
self.call_nmcli(
|
|
|
|
|
[
|
|
|
|
|
"connection",
|
|
|
|
|
"add",
|
|
|
|
|
"type",
|
|
|
|
|
"gsm",
|
|
|
|
|
"autoconnect",
|
|
|
|
|
"no",
|
|
|
|
|
"con-name",
|
|
|
|
|
con_name,
|
|
|
|
|
"ifname",
|
|
|
|
|
"*",
|
|
|
|
|
"apn",
|
|
|
|
|
apn,
|
|
|
|
|
"serial.baud",
|
|
|
|
|
"5",
|
|
|
|
|
"serial.send-delay",
|
|
|
|
|
"100",
|
|
|
|
|
"serial.pari",
|
|
|
|
|
"1",
|
|
|
|
|
"ipv4.dns-options",
|
|
|
|
|
" ",
|
|
|
|
|
],
|
|
|
|
|
replace_stdout=replace_uuids,
|
|
|
|
|
)
|
2020-06-09 16:28:32 -04:00
|
|
|
|
|
|
|
|
replace_uuids.append(
|
2022-04-20 08:37:09 +02:00
|
|
|
self.ReplaceTextConUuid("ethernet", "UUID-ethernet-REPLACED-REPLACED-REPL")
|
2020-06-09 16:28:32 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
self.call_nmcli(
|
|
|
|
|
["c", "add", "type", "ethernet", "ifname", "*"],
|
|
|
|
|
replace_stdout=replace_uuids,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
self.call_nmcli_l(["c", "s"], replace_stdout=replace_uuids)
|
|
|
|
|
|
|
|
|
|
self.call_nmcli_l(["-f", "ALL", "c", "s"], replace_stdout=replace_uuids)
|
|
|
|
|
|
|
|
|
|
self.call_nmcli_l(
|
|
|
|
|
["--complete-args", "-f", "ALL", "c", "s", ""],
|
|
|
|
|
replace_stdout=replace_uuids,
|
|
|
|
|
sort_lines_stdout=True,
|
|
|
|
|
)
|
|
|
|
|
|
2021-04-28 23:34:17 +02:00
|
|
|
for con_name, apn in con_gsm_list:
|
|
|
|
|
self.call_nmcli_l(["con", "s", con_name], replace_stdout=replace_uuids)
|
|
|
|
|
self.call_nmcli_l(
|
|
|
|
|
["-g", "all", "con", "s", con_name], replace_stdout=replace_uuids
|
|
|
|
|
)
|
2018-10-17 13:41:08 +02:00
|
|
|
|
2018-05-15 12:06:08 +02:00
|
|
|
# activate the same profile on multiple devices. Our stub-implmentation
|
|
|
|
|
# is fine with that... although NetworkManager service would reject
|
|
|
|
|
# such a configuration by deactivating the profile first. But note that
|
|
|
|
|
# that is only an internal behavior of NetworkManager service. The D-Bus
|
|
|
|
|
# API perfectly allows for one profile to be active multiple times. Also
|
|
|
|
|
# note, that there is always a short time where one profile goes down,
|
|
|
|
|
# while another is activating. Hence, while real NetworkManager commonly
|
|
|
|
|
# does not allow that multiple profiles *stay* connected at the same
|
|
|
|
|
# time, there is always the possibility that a profile is activating/active
|
|
|
|
|
# on a device, while also activating/deactivating in parallel.
|
2020-06-09 16:28:32 -04:00
|
|
|
for dev in ["eth0", "eth1"]:
|
|
|
|
|
self.call_nmcli(["con", "up", "ethernet", "ifname", dev])
|
2018-05-14 16:01:32 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
self.call_nmcli_l(["con"], replace_stdout=replace_uuids)
|
2018-05-14 16:01:32 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
self.call_nmcli_l(["-f", "ALL", "con"], replace_stdout=replace_uuids)
|
2018-05-14 16:01:32 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
self.call_nmcli_l(
|
|
|
|
|
["-f", "ALL", "con", "s", "-a"], replace_stdout=replace_uuids
|
|
|
|
|
)
|
2018-05-29 07:53:54 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
self.call_nmcli_l(
|
|
|
|
|
["-f", "ACTIVE-PATH,DEVICE,UUID", "con", "s", "-act"],
|
|
|
|
|
replace_stdout=replace_uuids,
|
|
|
|
|
)
|
2018-05-29 07:53:54 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
self.call_nmcli_l(
|
|
|
|
|
["-f", "UUID,NAME", "con", "s", "--active"],
|
|
|
|
|
replace_stdout=replace_uuids,
|
|
|
|
|
)
|
2018-05-29 07:53:54 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
self.call_nmcli_l(
|
|
|
|
|
["-f", "ALL", "con", "s", "ethernet"], replace_stdout=replace_uuids
|
|
|
|
|
)
|
2018-05-14 16:01:32 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
self.call_nmcli_l(
|
|
|
|
|
["-f", "GENERAL.STATE", "con", "s", "ethernet"],
|
|
|
|
|
replace_stdout=replace_uuids,
|
|
|
|
|
)
|
2018-06-01 09:13:41 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
self.call_nmcli_l(["con", "s", "ethernet"], replace_stdout=replace_uuids)
|
2018-05-15 14:56:46 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
self.call_nmcli_l(
|
|
|
|
|
["-f", "ALL", "dev", "status"], replace_stdout=replace_uuids
|
|
|
|
|
)
|
2018-05-14 16:01:32 +02:00
|
|
|
|
2019-09-27 09:47:06 +02:00
|
|
|
# test invalid call ('s' abbrevates 'status' and not 'show'
|
2020-06-09 16:28:32 -04:00
|
|
|
self.call_nmcli_l(
|
|
|
|
|
["-f", "ALL", "dev", "s", "eth0"], replace_stdout=replace_uuids
|
|
|
|
|
)
|
2019-09-27 09:47:06 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
self.call_nmcli_l(
|
|
|
|
|
["-f", "ALL", "dev", "show", "eth0"], replace_stdout=replace_uuids
|
|
|
|
|
)
|
2018-05-14 16:01:32 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
self.call_nmcli_l(
|
|
|
|
|
["-f", "ALL", "-t", "dev", "show", "eth0"], replace_stdout=replace_uuids
|
|
|
|
|
)
|
2018-06-08 08:50:03 +02:00
|
|
|
|
2018-05-28 14:01:25 +02:00
|
|
|
self.async_wait()
|
|
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
self.srv.setProperty(
|
|
|
|
|
"/org/freedesktop/NetworkManager/ActiveConnection/1",
|
|
|
|
|
"State",
|
|
|
|
|
dbus.UInt32(NM.ActiveConnectionState.DEACTIVATING),
|
|
|
|
|
)
|
2018-05-15 16:39:43 +02:00
|
|
|
|
2021-07-29 12:34:49 +02:00
|
|
|
self.call_nmcli_l([], replace_stdout=replace_uuids)
|
|
|
|
|
|
2018-05-29 10:13:18 +02:00
|
|
|
for i in [0, 1]:
|
|
|
|
|
if i == 1:
|
|
|
|
|
self.async_wait()
|
2020-06-09 16:28:32 -04:00
|
|
|
self.srv.op_ConnectionSetVisible(False, con_id="ethernet")
|
2018-05-15 16:39:43 +02:00
|
|
|
|
2018-06-21 13:39:18 +02:00
|
|
|
for mode in Util.iter_nmcli_output_modes():
|
2020-06-09 16:28:32 -04:00
|
|
|
self.call_nmcli_l(
|
|
|
|
|
mode + ["-f", "ALL", "con"], replace_stdout=replace_uuids
|
|
|
|
|
)
|
2018-05-15 16:39:43 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
self.call_nmcli_l(
|
|
|
|
|
mode + ["-f", "UUID,TYPE", "con"], replace_stdout=replace_uuids
|
|
|
|
|
)
|
2018-05-15 16:39:43 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
self.call_nmcli_l(
|
|
|
|
|
mode + ["con", "s", "ethernet"], replace_stdout=replace_uuids
|
|
|
|
|
)
|
2018-05-15 16:39:43 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
self.call_nmcli_l(
|
|
|
|
|
mode
|
|
|
|
|
+ ["c", "s", "/org/freedesktop/NetworkManager/ActiveConnection/1"],
|
|
|
|
|
replace_stdout=replace_uuids,
|
|
|
|
|
)
|
2018-05-15 16:39:43 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
self.call_nmcli_l(
|
|
|
|
|
mode + ["-f", "all", "dev", "show", "eth0"],
|
|
|
|
|
replace_stdout=replace_uuids,
|
|
|
|
|
)
|
2018-06-08 08:50:03 +02:00
|
|
|
|
2018-07-20 16:32:07 +02:00
|
|
|
@nm_test
|
2018-05-25 18:00:51 +02:00
|
|
|
def test_004(self):
|
|
|
|
|
self.init_001()
|
|
|
|
|
|
2019-12-13 09:32:23 +01:00
|
|
|
replace_uuids = []
|
2018-05-25 18:00:51 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
replace_uuids.append(
|
2022-04-20 08:37:09 +02:00
|
|
|
self.ReplaceTextConUuid("con-xx1", "UUID-con-xx1-REPLACED-REPLACED-REPLA")
|
2020-06-09 16:28:32 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
self.call_nmcli(
|
|
|
|
|
[
|
|
|
|
|
"c",
|
|
|
|
|
"add",
|
|
|
|
|
"type",
|
|
|
|
|
"wifi",
|
|
|
|
|
"ifname",
|
|
|
|
|
"*",
|
|
|
|
|
"ssid",
|
|
|
|
|
"foobar",
|
|
|
|
|
"con-name",
|
|
|
|
|
"con-xx1",
|
|
|
|
|
],
|
|
|
|
|
replace_stdout=replace_uuids,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
self.call_nmcli(["connection", "mod", "con-xx1", "ip.gateway", ""])
|
|
|
|
|
self.call_nmcli(
|
|
|
|
|
["connection", "mod", "con-xx1", "ipv4.gateway", "172.16.0.1"], lang="pl"
|
|
|
|
|
)
|
|
|
|
|
self.call_nmcli(["connection", "mod", "con-xx1", "ipv6.gateway", "::99"])
|
|
|
|
|
self.call_nmcli(["connection", "mod", "con-xx1", "802.abc", ""])
|
|
|
|
|
self.call_nmcli(["connection", "mod", "con-xx1", "802-11-wireless.band", "a"])
|
|
|
|
|
self.call_nmcli(
|
|
|
|
|
[
|
|
|
|
|
"connection",
|
|
|
|
|
"mod",
|
|
|
|
|
"con-xx1",
|
|
|
|
|
"ipv4.addresses",
|
|
|
|
|
"192.168.77.5/24",
|
|
|
|
|
"ipv4.routes",
|
|
|
|
|
"2.3.4.5/32 192.168.77.1",
|
|
|
|
|
"ipv6.addresses",
|
|
|
|
|
"1:2:3:4::6/64",
|
|
|
|
|
"ipv6.routes",
|
|
|
|
|
"1:2:3:4:5:6::5/128",
|
|
|
|
|
]
|
|
|
|
|
)
|
|
|
|
|
self.call_nmcli_l(["con", "s", "con-xx1"], replace_stdout=replace_uuids)
|
2018-06-05 15:02:49 +02:00
|
|
|
|
2018-06-05 20:23:31 +02:00
|
|
|
self.async_wait()
|
|
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
replace_uuids.append(
|
2022-04-20 08:37:09 +02:00
|
|
|
self.ReplaceTextConUuid("con-vpn-1", "UUID-con-vpn-1-REPLACED-REPLACED-REP")
|
2020-06-09 16:28:32 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
self.call_nmcli(
|
|
|
|
|
[
|
|
|
|
|
"connection",
|
|
|
|
|
"add",
|
|
|
|
|
"type",
|
|
|
|
|
"vpn",
|
|
|
|
|
"con-name",
|
|
|
|
|
"con-vpn-1",
|
|
|
|
|
"ifname",
|
|
|
|
|
"*",
|
|
|
|
|
"vpn-type",
|
|
|
|
|
"openvpn",
|
|
|
|
|
"vpn.data",
|
|
|
|
|
"key1 = val1, key2 = val2, key3=val3",
|
|
|
|
|
],
|
|
|
|
|
replace_stdout=replace_uuids,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
self.call_nmcli_l(["con", "s"], replace_stdout=replace_uuids)
|
|
|
|
|
self.call_nmcli_l(["con", "s", "con-vpn-1"], replace_stdout=replace_uuids)
|
|
|
|
|
|
|
|
|
|
self.call_nmcli(["con", "up", "con-xx1"])
|
|
|
|
|
self.call_nmcli_l(["con", "s"], replace_stdout=replace_uuids)
|
|
|
|
|
|
|
|
|
|
self.call_nmcli(["con", "up", "con-vpn-1"])
|
|
|
|
|
self.call_nmcli_l(["con", "s"], replace_stdout=replace_uuids)
|
|
|
|
|
self.call_nmcli_l(["con", "s", "con-vpn-1"], replace_stdout=replace_uuids)
|
2018-06-05 20:23:31 +02:00
|
|
|
|
|
|
|
|
self.async_wait()
|
|
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
self.srv.setProperty(
|
|
|
|
|
"/org/freedesktop/NetworkManager/ActiveConnection/2",
|
|
|
|
|
"VpnState",
|
|
|
|
|
dbus.UInt32(NM.VpnConnectionState.ACTIVATED),
|
|
|
|
|
)
|
2018-06-05 20:23:31 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
uuids = Util.replace_text_sort_list(
|
|
|
|
|
[c[1] for c in self.srv.findConnections()], replace_uuids
|
|
|
|
|
)
|
2019-12-13 10:05:34 +01:00
|
|
|
|
2021-07-29 12:34:49 +02:00
|
|
|
self.call_nmcli_l([], replace_stdout=replace_uuids)
|
|
|
|
|
|
2018-07-13 13:16:17 +02:00
|
|
|
for mode in Util.iter_nmcli_output_modes():
|
2018-06-05 20:23:31 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
self.call_nmcli_l(
|
|
|
|
|
mode + ["con", "s", "con-vpn-1"], replace_stdout=replace_uuids
|
|
|
|
|
)
|
|
|
|
|
self.call_nmcli_l(
|
|
|
|
|
mode + ["con", "s", "con-vpn-1"], replace_stdout=replace_uuids
|
|
|
|
|
)
|
2018-06-05 20:23:31 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
self.call_nmcli_l(
|
|
|
|
|
mode + ["-f", "ALL", "con", "s", "con-vpn-1"],
|
|
|
|
|
replace_stdout=replace_uuids,
|
|
|
|
|
)
|
2018-06-05 20:23:31 +02:00
|
|
|
|
2018-07-13 13:16:17 +02:00
|
|
|
# This only filters 'vpn' settings from the connection profile.
|
|
|
|
|
# Contrary to '-f GENERAL' below, it does not show the properties of
|
|
|
|
|
# the activated VPN connection. This is a nmcli bug.
|
2020-06-09 16:28:32 -04:00
|
|
|
self.call_nmcli_l(
|
|
|
|
|
mode + ["-f", "VPN", "con", "s", "con-vpn-1"],
|
|
|
|
|
replace_stdout=replace_uuids,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
self.call_nmcli_l(
|
|
|
|
|
mode + ["-f", "GENERAL", "con", "s", "con-vpn-1"],
|
|
|
|
|
replace_stdout=replace_uuids,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
self.call_nmcli_l(mode + ["dev", "s"], replace_stdout=replace_uuids)
|
|
|
|
|
|
|
|
|
|
self.call_nmcli_l(
|
|
|
|
|
mode + ["-f", "all", "dev", "status"], replace_stdout=replace_uuids
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
self.call_nmcli_l(mode + ["dev", "show"], replace_stdout=replace_uuids)
|
|
|
|
|
|
|
|
|
|
self.call_nmcli_l(
|
|
|
|
|
mode + ["-f", "all", "dev", "show"], replace_stdout=replace_uuids
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
self.call_nmcli_l(
|
|
|
|
|
mode + ["dev", "show", "wlan0"], replace_stdout=replace_uuids
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
self.call_nmcli_l(
|
|
|
|
|
mode + ["-f", "all", "dev", "show", "wlan0"],
|
|
|
|
|
replace_stdout=replace_uuids,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
self.call_nmcli_l(
|
|
|
|
|
mode
|
|
|
|
|
+ [
|
|
|
|
|
"-f",
|
|
|
|
|
"GENERAL,GENERAL.HWADDR,WIFI-PROPERTIES",
|
|
|
|
|
"dev",
|
|
|
|
|
"show",
|
|
|
|
|
"wlan0",
|
|
|
|
|
],
|
|
|
|
|
replace_stdout=replace_uuids,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
self.call_nmcli_l(
|
|
|
|
|
mode
|
|
|
|
|
+ [
|
|
|
|
|
"-f",
|
|
|
|
|
"GENERAL,GENERAL.HWADDR,WIFI-PROPERTIES",
|
|
|
|
|
"dev",
|
|
|
|
|
"show",
|
|
|
|
|
"wlan0",
|
|
|
|
|
],
|
|
|
|
|
replace_stdout=replace_uuids,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
self.call_nmcli_l(
|
|
|
|
|
mode + ["-f", "DEVICE,TYPE,DBUS-PATH", "dev"],
|
|
|
|
|
replace_stdout=replace_uuids,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
self.call_nmcli_l(
|
|
|
|
|
mode + ["-f", "ALL", "device", "wifi", "list"],
|
|
|
|
|
replace_stdout=replace_uuids,
|
|
|
|
|
)
|
|
|
|
|
self.call_nmcli_l(
|
|
|
|
|
mode + ["-f", "COMMON", "device", "wifi", "list"],
|
|
|
|
|
replace_stdout=replace_uuids,
|
|
|
|
|
)
|
|
|
|
|
self.call_nmcli_l(
|
|
|
|
|
mode
|
|
|
|
|
+ [
|
|
|
|
|
"-f",
|
|
|
|
|
"NAME,SSID,SSID-HEX,BSSID,MODE,CHAN,FREQ,RATE,SIGNAL,BARS,SECURITY,WPA-FLAGS,RSN-FLAGS,DEVICE,ACTIVE,IN-USE,DBUS-PATH",
|
|
|
|
|
"device",
|
|
|
|
|
"wifi",
|
|
|
|
|
"list",
|
|
|
|
|
],
|
|
|
|
|
replace_stdout=replace_uuids,
|
|
|
|
|
)
|
|
|
|
|
self.call_nmcli_l(
|
|
|
|
|
mode
|
|
|
|
|
+ ["-f", "ALL", "device", "wifi", "list", "bssid", "C0:E2:BE:E8:EF:B6"],
|
|
|
|
|
replace_stdout=replace_uuids,
|
|
|
|
|
)
|
|
|
|
|
self.call_nmcli_l(
|
|
|
|
|
mode
|
|
|
|
|
+ [
|
|
|
|
|
"-f",
|
|
|
|
|
"COMMON",
|
|
|
|
|
"device",
|
|
|
|
|
"wifi",
|
|
|
|
|
"list",
|
|
|
|
|
"bssid",
|
|
|
|
|
"C0:E2:BE:E8:EF:B6",
|
|
|
|
|
],
|
|
|
|
|
replace_stdout=replace_uuids,
|
|
|
|
|
)
|
|
|
|
|
self.call_nmcli_l(
|
|
|
|
|
mode
|
|
|
|
|
+ [
|
|
|
|
|
"-f",
|
|
|
|
|
"NAME,SSID,SSID-HEX,BSSID,MODE,CHAN,FREQ,RATE,SIGNAL,BARS,SECURITY,WPA-FLAGS,RSN-FLAGS,DEVICE,ACTIVE,IN-USE,DBUS-PATH",
|
|
|
|
|
"device",
|
|
|
|
|
"wifi",
|
|
|
|
|
"list",
|
|
|
|
|
"bssid",
|
|
|
|
|
"C0:E2:BE:E8:EF:B6",
|
|
|
|
|
],
|
|
|
|
|
replace_stdout=replace_uuids,
|
|
|
|
|
)
|
|
|
|
|
self.call_nmcli_l(
|
|
|
|
|
mode + ["-f", "ALL", "device", "show", "wlan0"],
|
|
|
|
|
replace_stdout=replace_uuids,
|
|
|
|
|
)
|
|
|
|
|
self.call_nmcli_l(
|
|
|
|
|
mode + ["-f", "COMMON", "device", "show", "wlan0"],
|
|
|
|
|
replace_stdout=replace_uuids,
|
|
|
|
|
)
|
|
|
|
|
self.call_nmcli_l(
|
|
|
|
|
mode
|
|
|
|
|
+ [
|
|
|
|
|
"-f",
|
|
|
|
|
"GENERAL,CAPABILITIES,WIFI-PROPERTIES,AP,WIRED-PROPERTIES,WIMAX-PROPERTIES,NSP,IP4,DHCP4,IP6,DHCP6,BOND,TEAM,BRIDGE,VLAN,BLUETOOTH,CONNECTIONS",
|
|
|
|
|
"device",
|
|
|
|
|
"show",
|
|
|
|
|
"wlan0",
|
|
|
|
|
],
|
|
|
|
|
replace_stdout=replace_uuids,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
self.call_nmcli_l(
|
|
|
|
|
mode + ["dev", "lldp", "list", "ifname", "eth0"],
|
|
|
|
|
replace_stdout=replace_uuids,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
self.call_nmcli_l(
|
|
|
|
|
mode
|
|
|
|
|
+ [
|
|
|
|
|
"-f",
|
|
|
|
|
"connection.id,connection.uuid,connection.type,connection.interface-name,802-3-ethernet.mac-address,vpn.user-name",
|
|
|
|
|
"connection",
|
|
|
|
|
"show",
|
|
|
|
|
]
|
|
|
|
|
+ uuids,
|
|
|
|
|
replace_stdout=replace_uuids,
|
|
|
|
|
replace_cmd=replace_uuids,
|
|
|
|
|
)
|
2019-12-13 10:05:34 +01:00
|
|
|
|
2022-04-06 10:18:27 +02:00
|
|
|
@nm_test_no_dbus
|
|
|
|
|
def test_offline(self):
|
|
|
|
|
|
|
|
|
|
# Make sure we're not using D-Bus
|
|
|
|
|
no_dbus_env = {
|
|
|
|
|
"DBUS_SYSTEM_BUS_ADDRESS": "very:invalid",
|
|
|
|
|
"DBUS_SESSION_BUS_ADDRESS": "very:invalid",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# This check just makes sure the above works and the
|
|
|
|
|
# "nmcli g" command indeed fails talking to D-Bus
|
|
|
|
|
self.call_nmcli(
|
|
|
|
|
["g"],
|
|
|
|
|
extra_env=no_dbus_env,
|
clients/tests: workaround unexpected output for offline test
This test calls "nmcli g" with a bogus D-Bus bus address. We expect
a failure on stderr.
On alpine:latest, the error however looks slightly different:
b'size: 258\nlocation: src/tests/client/test-client.py:test_offline()/1\ncmd: $NMCLI g\nlang: C\nreturncode: 1\nstderr: 136 bytes\n>>>\nError: Could not create NMClient object: Key/Value pair 0, *invalid*, in address element *very:invalid* does not contain an equal sign.\n\n<<<\n'
On ubuntu:16.04 and debian:9 we got:
b"size: 258\nlocation: src/tests/client/test-client.py:test_offline()/1\ncmd: $NMCLI g\nlang: C\nreturncode: 1\nstderr: 136 bytes\n>>>\nError: Could not create NMClient object: Key/Value pair 0, 'invalid', in address element 'very:invalid' does not contain an equal sign.\n\n<<<\n"
On fedora and most recent systemd we got:
b'size: 258\nlocation: src/tests/client/test-client.py:test_offline()/1\ncmd: $NMCLI g\nlang: C\nreturncode: 1\nstderr: 136 bytes\n>>>\nError: Could not create NMClient object: Key/Value pair 0, ?invalid?, in address element ?very:invalid? does not contain an equal sign.\n\n<<<\n'
This depends on the glib version (whether to print `%s', '%s', or “%s”).
Also, as we run the application with lang=C, so that libc (I think)
replaces Unicode with an ASCII character. Here musl and glibc behave
differently.
Workaround by replace the unexpected text.
2022-04-20 15:11:15 +02:00
|
|
|
replace_stderr=[
|
2022-04-20 15:47:35 +02:00
|
|
|
Util.ReplaceTextRegex(
|
2022-04-20 15:47:01 +02:00
|
|
|
# depending on glib version, it prints `%s', '%s', or “%s”.
|
|
|
|
|
# depending on libc version, it converts unicode to ? or *.
|
|
|
|
|
r"Key/Value pair 0, [`*?']invalid[*?'], in address element [`*?']very:invalid[*?'] does not contain an equal sign",
|
clients/tests: workaround unexpected output for offline test
This test calls "nmcli g" with a bogus D-Bus bus address. We expect
a failure on stderr.
On alpine:latest, the error however looks slightly different:
b'size: 258\nlocation: src/tests/client/test-client.py:test_offline()/1\ncmd: $NMCLI g\nlang: C\nreturncode: 1\nstderr: 136 bytes\n>>>\nError: Could not create NMClient object: Key/Value pair 0, *invalid*, in address element *very:invalid* does not contain an equal sign.\n\n<<<\n'
On ubuntu:16.04 and debian:9 we got:
b"size: 258\nlocation: src/tests/client/test-client.py:test_offline()/1\ncmd: $NMCLI g\nlang: C\nreturncode: 1\nstderr: 136 bytes\n>>>\nError: Could not create NMClient object: Key/Value pair 0, 'invalid', in address element 'very:invalid' does not contain an equal sign.\n\n<<<\n"
On fedora and most recent systemd we got:
b'size: 258\nlocation: src/tests/client/test-client.py:test_offline()/1\ncmd: $NMCLI g\nlang: C\nreturncode: 1\nstderr: 136 bytes\n>>>\nError: Could not create NMClient object: Key/Value pair 0, ?invalid?, in address element ?very:invalid? does not contain an equal sign.\n\n<<<\n'
This depends on the glib version (whether to print `%s', '%s', or “%s”).
Also, as we run the application with lang=C, so that libc (I think)
replaces Unicode with an ASCII character. Here musl and glibc behave
differently.
Workaround by replace the unexpected text.
2022-04-20 15:11:15 +02:00
|
|
|
"Key/Value pair 0, 'invalid', in address element 'very:invalid' does not contain an equal sign",
|
|
|
|
|
)
|
|
|
|
|
],
|
2022-04-06 10:18:27 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
replace_uuids = [
|
2022-04-20 15:47:35 +02:00
|
|
|
Util.ReplaceTextRegex(
|
2022-04-20 08:37:09 +02:00
|
|
|
r"\buuid=[-a-f0-9]+\b", "uuid=UUID-WAS-HERE-BUT-IS-NO-MORE-SADLY"
|
2022-04-06 10:18:27 +02:00
|
|
|
)
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
self.call_nmcli(
|
|
|
|
|
["--offline", "c", "add", "type", "ethernet"],
|
|
|
|
|
extra_env=no_dbus_env,
|
|
|
|
|
replace_stdout=replace_uuids,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
self.call_nmcli(
|
|
|
|
|
["--offline", "c", "show"],
|
|
|
|
|
extra_env=no_dbus_env,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
self.call_nmcli(
|
|
|
|
|
["--offline", "g"],
|
|
|
|
|
extra_env=no_dbus_env,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
self.call_nmcli(
|
|
|
|
|
["--offline"],
|
|
|
|
|
extra_env=no_dbus_env,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
self.call_nmcli(
|
|
|
|
|
[
|
|
|
|
|
"--offline",
|
|
|
|
|
"c",
|
|
|
|
|
"add",
|
|
|
|
|
"type",
|
|
|
|
|
"wifi",
|
|
|
|
|
"ssid",
|
|
|
|
|
"lala",
|
|
|
|
|
"802-1x.eap",
|
|
|
|
|
"pwd",
|
|
|
|
|
"802-1x.identity",
|
|
|
|
|
"foo",
|
|
|
|
|
"802-1x.password",
|
|
|
|
|
"bar",
|
|
|
|
|
],
|
|
|
|
|
extra_env=no_dbus_env,
|
|
|
|
|
replace_stdout=replace_uuids,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
self.call_nmcli(
|
|
|
|
|
[
|
|
|
|
|
"--offline",
|
|
|
|
|
"c",
|
|
|
|
|
"add",
|
|
|
|
|
"type",
|
|
|
|
|
"wifi",
|
|
|
|
|
"ssid",
|
|
|
|
|
"lala",
|
|
|
|
|
"802-1x.eap",
|
|
|
|
|
"pwd",
|
|
|
|
|
"802-1x.identity",
|
|
|
|
|
"foo",
|
|
|
|
|
"802-1x.password",
|
|
|
|
|
"bar",
|
|
|
|
|
"802-1x.password-flags",
|
|
|
|
|
"agent-owned",
|
|
|
|
|
],
|
|
|
|
|
extra_env=no_dbus_env,
|
|
|
|
|
replace_stdout=replace_uuids,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
self.call_nmcli(
|
|
|
|
|
["--complete-args", "--offline", "conn", "modify", "ipv6.ad"],
|
|
|
|
|
extra_env=no_dbus_env,
|
|
|
|
|
)
|
|
|
|
|
|
2022-08-23 15:17:32 +02:00
|
|
|
@skip_without_pexpect
|
2022-06-28 14:29:24 +02:00
|
|
|
@nm_test
|
|
|
|
|
def test_ask_mode(self):
|
|
|
|
|
nmc = self.call_nmcli_pexpect(["--ask", "c", "add"])
|
2023-02-03 14:07:25 +01:00
|
|
|
nmc.pexp.expect("Connection type:")
|
|
|
|
|
nmc.pexp.sendline("ethernet")
|
|
|
|
|
nmc.pexp.expect("Interface name:")
|
|
|
|
|
nmc.pexp.sendline("eth0")
|
|
|
|
|
nmc.pexp.expect("There are 3 optional settings for Wired Ethernet.")
|
|
|
|
|
nmc.pexp.expect("Do you want to provide them\? \(yes/no\) \[yes]")
|
|
|
|
|
nmc.pexp.sendline("no")
|
|
|
|
|
nmc.pexp.expect("There are 2 optional settings for IPv4 protocol.")
|
|
|
|
|
nmc.pexp.expect("Do you want to provide them\? \(yes/no\) \[yes]")
|
|
|
|
|
nmc.pexp.sendline("no")
|
|
|
|
|
nmc.pexp.expect("There are 2 optional settings for IPv6 protocol.")
|
|
|
|
|
nmc.pexp.expect("Do you want to provide them\? \(yes/no\) \[yes]")
|
|
|
|
|
nmc.pexp.sendline("no")
|
|
|
|
|
nmc.pexp.expect("There are 4 optional settings for Proxy.")
|
|
|
|
|
nmc.pexp.expect("Do you want to provide them\? \(yes/no\) \[yes]")
|
|
|
|
|
nmc.pexp.sendline("no")
|
|
|
|
|
nmc.pexp.expect("Connection 'ethernet' \(.*\) successfully added.")
|
|
|
|
|
nmc.pexp.expect(pexpect.EOF)
|
|
|
|
|
Util.valgrind_check_log(nmc.valgrind_log, "test_ask_mode")
|
2022-06-28 14:29:24 +02:00
|
|
|
|
2022-11-10 12:52:17 +01:00
|
|
|
@skip_without_pexpect
|
|
|
|
|
@nm_test
|
|
|
|
|
def test_monitor(self):
|
2023-02-03 14:07:25 +01:00
|
|
|
def start_mon(self):
|
2022-11-10 12:52:17 +01:00
|
|
|
nmc = self.call_nmcli_pexpect(["monitor"])
|
2023-02-03 14:07:25 +01:00
|
|
|
nmc.pexp.expect("NetworkManager is running")
|
2022-11-10 12:52:17 +01:00
|
|
|
return nmc
|
|
|
|
|
|
2023-02-03 14:07:25 +01:00
|
|
|
def end_mon(self, nmc):
|
2023-02-07 12:40:50 +01:00
|
|
|
nmc.pexp.kill(signal.SIGINT)
|
2023-02-03 14:07:25 +01:00
|
|
|
nmc.pexp.expect(pexpect.EOF)
|
|
|
|
|
Util.valgrind_check_log(nmc.valgrind_log, "test_monitor")
|
2022-11-10 12:52:17 +01:00
|
|
|
|
2023-02-03 14:07:25 +01:00
|
|
|
nmc = start_mon(self)
|
2022-11-10 12:52:17 +01:00
|
|
|
|
|
|
|
|
self.srv.op_AddObj("WiredDevice", iface="eth0")
|
2023-02-03 14:07:25 +01:00
|
|
|
nmc.pexp.expect("eth0: device created\r\n")
|
2022-11-10 12:52:17 +01:00
|
|
|
|
|
|
|
|
self.srv.addConnection(
|
|
|
|
|
{"connection": {"type": "802-3-ethernet", "id": "con-1"}}
|
|
|
|
|
)
|
2023-02-03 14:07:25 +01:00
|
|
|
nmc.pexp.expect("con-1: connection profile created\r\n")
|
2022-11-10 12:52:17 +01:00
|
|
|
|
2023-02-03 14:07:25 +01:00
|
|
|
end_mon(self, nmc)
|
2022-11-10 12:52:17 +01:00
|
|
|
|
2023-02-03 14:07:25 +01:00
|
|
|
nmc = start_mon(self)
|
2023-02-07 11:56:38 +01:00
|
|
|
self.srv_shutdown()
|
2023-02-07 11:26:58 +01:00
|
|
|
Util.pexpect_expect_all(
|
|
|
|
|
nmc.pexp,
|
|
|
|
|
"con-1: connection profile removed",
|
|
|
|
|
"eth0: device removed",
|
|
|
|
|
)
|
2023-02-03 14:07:25 +01:00
|
|
|
nmc.pexp.expect("NetworkManager is stopped")
|
|
|
|
|
end_mon(self, nmc)
|
2022-11-10 12:52:17 +01:00
|
|
|
|
2019-12-13 10:05:34 +01:00
|
|
|
|
2018-05-04 09:02:53 +02:00
|
|
|
###############################################################################
|
|
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
|
2023-03-02 19:35:38 +01:00
|
|
|
class TestNmCloudSetup(TestNmClient):
|
|
|
|
|
def cloud_setup_test(func):
|
|
|
|
|
"""
|
|
|
|
|
Runs the mock NetworkManager along with a mock cloud metadata service.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
def f(self):
|
|
|
|
|
if pexpect is None:
|
|
|
|
|
raise unittest.SkipTest("pexpect not available")
|
|
|
|
|
|
2023-03-30 14:41:31 +02:00
|
|
|
if tuple(sys.version_info[0:2]) < (3, 2):
|
|
|
|
|
# subprocess.Popen()'s "pass_fd" argument requires at least Python 3.2.
|
|
|
|
|
raise unittest.SkipTest("This test requires at least Python 3.2")
|
|
|
|
|
|
2023-03-02 19:35:38 +01:00
|
|
|
s = socket.socket()
|
|
|
|
|
s.set_inheritable(True)
|
|
|
|
|
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
|
|
|
|
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
|
|
|
|
|
s.bind(("localhost", 0))
|
|
|
|
|
|
|
|
|
|
# The same value as Python's TCPServer uses.
|
|
|
|
|
# Chosen by summoning the sprit of TCP under influence of
|
|
|
|
|
# hallucinogenic substances.
|
|
|
|
|
s.listen(5)
|
|
|
|
|
|
|
|
|
|
def pass_socket():
|
|
|
|
|
os.dup2(s.fileno(), 3, inheritable=True)
|
|
|
|
|
|
|
|
|
|
service_path = PathConfiguration.test_cloud_meta_mock_path()
|
|
|
|
|
env = os.environ.copy()
|
|
|
|
|
env["LISTEN_FDS"] = "1"
|
|
|
|
|
p = subprocess.Popen(
|
|
|
|
|
[sys.executable, service_path],
|
|
|
|
|
stdin=subprocess.PIPE,
|
|
|
|
|
env=env,
|
|
|
|
|
pass_fds=(s.fileno(),),
|
|
|
|
|
preexec_fn=pass_socket,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
self.md_url = "http://%s:%d" % s.getsockname()
|
|
|
|
|
s.close()
|
|
|
|
|
|
|
|
|
|
self.srv_start()
|
|
|
|
|
func(self)
|
|
|
|
|
self._nm_test_post()
|
|
|
|
|
|
|
|
|
|
p.terminate()
|
|
|
|
|
p.wait()
|
|
|
|
|
|
|
|
|
|
return f
|
|
|
|
|
|
|
|
|
|
@cloud_setup_test
|
|
|
|
|
def test_ec2(self):
|
|
|
|
|
|
|
|
|
|
# Add a device with an active connection that has IPv4 configured
|
|
|
|
|
self.srv.op_AddObj("WiredDevice", iface="eth0")
|
|
|
|
|
self.srv.addAndActivateConnection(
|
|
|
|
|
{
|
|
|
|
|
"connection": {"type": "802-3-ethernet", "id": "con-eth0"},
|
|
|
|
|
"ipv4": {"method": "auto"},
|
|
|
|
|
},
|
|
|
|
|
"/org/freedesktop/NetworkManager/Devices/1",
|
|
|
|
|
delay=0,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# The second connection has no IPv4
|
|
|
|
|
self.srv.op_AddObj("WiredDevice", iface="eth1")
|
|
|
|
|
self.srv.addAndActivateConnection(
|
|
|
|
|
{"connection": {"type": "802-3-ethernet", "id": "con-eth1"}},
|
|
|
|
|
"/org/freedesktop/NetworkManager/Devices/2",
|
|
|
|
|
"",
|
|
|
|
|
delay=0,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# Run nm-cloud-setup for the first time
|
|
|
|
|
nmc = self.call_pexpect(
|
|
|
|
|
ENV_NM_TEST_CLIENT_CLOUD_SETUP_PATH,
|
|
|
|
|
[],
|
|
|
|
|
{
|
|
|
|
|
"NM_CLOUD_SETUP_EC2_HOST": self.md_url,
|
|
|
|
|
"NM_CLOUD_SETUP_LOG": "trace",
|
|
|
|
|
"NM_CLOUD_SETUP_EC2": "yes",
|
|
|
|
|
},
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
nmc.pexp.expect("provider ec2 detected")
|
|
|
|
|
nmc.pexp.expect("found interfaces: 9E:C0:3E:92:24:2D, 53:E9:7E:52:8D:A8")
|
|
|
|
|
nmc.pexp.expect("get-config: starting")
|
|
|
|
|
nmc.pexp.expect("get-config: success")
|
|
|
|
|
nmc.pexp.expect("meta data received")
|
|
|
|
|
# One of the devices has no IPv4 configuration to be modified
|
|
|
|
|
nmc.pexp.expect("device has no suitable applied connection. Skip")
|
|
|
|
|
# The other one was lacking an address set it up.
|
|
|
|
|
nmc.pexp.expect("some changes were applied for provider ec2")
|
|
|
|
|
nmc.pexp.expect(pexpect.EOF)
|
|
|
|
|
|
|
|
|
|
# Run nm-cloud-setup for the second time
|
|
|
|
|
nmc = self.call_pexpect(
|
|
|
|
|
ENV_NM_TEST_CLIENT_CLOUD_SETUP_PATH,
|
|
|
|
|
[],
|
|
|
|
|
{
|
|
|
|
|
"NM_CLOUD_SETUP_EC2_HOST": self.md_url,
|
|
|
|
|
"NM_CLOUD_SETUP_LOG": "trace",
|
|
|
|
|
"NM_CLOUD_SETUP_EC2": "yes",
|
|
|
|
|
},
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
nmc.pexp.expect("provider ec2 detected")
|
|
|
|
|
nmc.pexp.expect("found interfaces: 9E:C0:3E:92:24:2D, 53:E9:7E:52:8D:A8")
|
|
|
|
|
nmc.pexp.expect("get-config: starting")
|
|
|
|
|
nmc.pexp.expect("get-config: success")
|
|
|
|
|
nmc.pexp.expect("meta data received")
|
|
|
|
|
# No changes this time
|
|
|
|
|
nmc.pexp.expect('device needs no update to applied connection "con-eth0"')
|
|
|
|
|
nmc.pexp.expect("no changes were applied for provider ec2")
|
|
|
|
|
nmc.pexp.expect(pexpect.EOF)
|
|
|
|
|
|
|
|
|
|
Util.valgrind_check_log(nmc.valgrind_log, "test_ec2")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
###############################################################################
|
|
|
|
|
|
|
|
|
|
|
2018-05-04 09:02:53 +02:00
|
|
|
def main():
|
|
|
|
|
global dbus_session_inited
|
|
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
if len(sys.argv) >= 2 and sys.argv[1] == "--started-with-dbus-session":
|
2018-05-04 09:02:53 +02:00
|
|
|
dbus_session_inited = True
|
|
|
|
|
del sys.argv[1]
|
|
|
|
|
|
|
|
|
|
if not dbus_session_inited:
|
|
|
|
|
# we don't have yet our own dbus-session. Reexec ourself with
|
|
|
|
|
# a new dbus-session.
|
|
|
|
|
try:
|
|
|
|
|
try:
|
2020-06-09 16:28:32 -04:00
|
|
|
os.execlp(
|
|
|
|
|
"dbus-run-session",
|
|
|
|
|
"dbus-run-session",
|
|
|
|
|
"--",
|
|
|
|
|
sys.executable,
|
|
|
|
|
__file__,
|
|
|
|
|
"--started-with-dbus-session",
|
|
|
|
|
*sys.argv[1:]
|
|
|
|
|
)
|
2018-05-04 09:02:53 +02:00
|
|
|
except OSError as e:
|
|
|
|
|
if e.errno != errno.ENOENT:
|
|
|
|
|
raise
|
|
|
|
|
# we have no dbus-run-session in path? Fall-through
|
|
|
|
|
# to skip tests gracefully
|
|
|
|
|
else:
|
2020-06-09 16:28:32 -04:00
|
|
|
raise Exception("unknown error during exec")
|
2018-05-04 09:02:53 +02:00
|
|
|
except Exception as e:
|
2020-06-09 16:28:32 -04:00
|
|
|
assert False, "Failure to re-exec dbus-run-session: %s" % (str(e))
|
2018-05-04 09:02:53 +02:00
|
|
|
|
|
|
|
|
if not dbus_session_inited:
|
|
|
|
|
# we still don't have a D-Bus session. Probably dbus-run-session is not available.
|
|
|
|
|
# retry with dbus-launch
|
2020-06-09 16:28:32 -04:00
|
|
|
if os.system("type dbus-launch 1>/dev/null") == 0:
|
2018-05-04 09:02:53 +02:00
|
|
|
try:
|
2020-06-09 16:28:32 -04:00
|
|
|
os.execlp(
|
|
|
|
|
"bash",
|
|
|
|
|
"bash",
|
|
|
|
|
"-e",
|
|
|
|
|
"-c",
|
|
|
|
|
"eval `dbus-launch --sh-syntax`;\n"
|
|
|
|
|
+ 'trap "kill $DBUS_SESSION_BUS_PID" EXIT;\n'
|
|
|
|
|
+ "\n"
|
2023-02-03 13:26:32 +01:00
|
|
|
+ Util.shlex_join(
|
2020-06-09 16:28:32 -04:00
|
|
|
[
|
2023-02-03 13:26:32 +01:00
|
|
|
sys.executable,
|
|
|
|
|
__file__,
|
|
|
|
|
"--started-with-dbus-session",
|
2020-06-09 16:28:32 -04:00
|
|
|
]
|
2023-02-03 13:26:32 +01:00
|
|
|
+ sys.argv[1:]
|
2020-06-09 16:28:32 -04:00
|
|
|
)
|
|
|
|
|
+ " \n"
|
|
|
|
|
+ "",
|
|
|
|
|
)
|
2018-05-04 09:02:53 +02:00
|
|
|
except Exception as e:
|
|
|
|
|
m = str(e)
|
|
|
|
|
else:
|
2020-06-09 16:28:32 -04:00
|
|
|
m = "unknown error"
|
|
|
|
|
assert False, "Failure to re-exec to start script with dbus-launch: %s" % (
|
|
|
|
|
m
|
|
|
|
|
)
|
2018-05-04 09:02:53 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
r = unittest.main(exit=False)
|
2018-05-27 20:58:21 +02:00
|
|
|
|
|
|
|
|
sys.exit(not r.result.wasSuccessful())
|
2018-05-04 09:02:53 +02:00
|
|
|
|
2020-06-09 16:28:32 -04:00
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
2018-05-04 09:02:53 +02:00
|
|
|
main()
|