NetworkManager/src/tests/client/test-client.py

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

1965 lines
65 KiB
Python
Raw Normal View History

#!/usr/bin/env python
# coding: utf-8
from __future__ import print_function
###############################################################################
#
# This test starts NetworkManager stub service in a user D-Bus session,
# and runs nmcli against it. The output is recorded and compared to a pre-generated
# expected output (src/tests/client/test-client.check-on-disk/*.expected) which
# is also committed to git.
#
###############################################################################
#
# HOWTO: Regenerate output
#
# When adjusting the tests, or when making changes to nmcli that intentionally
# change the output, the expected output must be regenerated.
#
# For that, you'd setup your system correctly (see SETUP below) and then simply:
#
# $ NM_TEST_REGENERATE=1 make check-local-tests-client
# # Or `NM_TEST_REGENERATE=1 make check -j 10`
# $ git diff ... ; git add ...
# # The previous step regenerated the expected output. Review the changes
# # and consider whether they are correct. Then commit the changes to git.
#
# With meson, you can do
# $ meson -Ddocs=true --prefix=/tmp/nm1 build
# $ ninja -C build
# $ ninja -C build install
# $ NM_TEST_REGENERATE=1 ninja -C build test
#
# Beware that you need to install the sources, and beware to choose a prefix that doesn't
# mess up your system (see SETUP below).
#
# SETUP: For regenerating the output, the translations must work. First
# test whether the following works:
#
# 1) LANG=pl_PL.UTF-8 /usr/bin/nmcli --version
# # Ensure that Polish output works for the system-installed nmcli.
# # If not, you should ensure that `locale -a` reports the Polish
# # locale. If that is not the case, how to enable the locale depends on
# # your distro.
# #
# # On Debian, you might do:
# # sed -i 's/^# \(pl_PL.UTF-8 .*\)$/\1/p' /etc/locale.gen
# # locale-gen pl_PL.UTF-8
# # On Fedora, you might install `glibc-langpack-pl` package.
#
# 2) LANG=pl_PL.UTF-8 ./src/nmcli/nmcli --version
# # Ensure that the built nmcli has Polish locale working. If not,
# # you probably need to first `make install` the application at the
# # correct prefix. Take care to configure the build with the desired
# # prefix, like `./configure --prefix=/opt/tmp`. Usually, you want to avoid
# # using /usr as prefix, because that might overwrite files from your
# # package management system.
#
###############################################################################
#
# Environment variables to configure test:
# (optional) The build dir. Optional, mainly used to find the nmcli binary (in case
# ENV_NM_TEST_CLIENT_NMCLI_PATH is not set.
ENV_NM_TEST_CLIENT_BUILDDIR = "NM_TEST_CLIENT_BUILDDIR"
# (optional) Path to nmcli. By default, it looks for nmcli in build dir.
# In particular, you can test also a nmcli binary installed somewhere else.
ENV_NM_TEST_CLIENT_NMCLI_PATH = "NM_TEST_CLIENT_NMCLI_PATH"
# (optional) The test also compares tranlsated output (l10n). This requires,
# that you first install the translation in the right place. So, by default,
# if a test for a translation fails, it will mark the test as skipped, and not
# fail the tests. Under the assumption, that the test cannot succeed currently.
# By setting NM_TEST_CLIENT_CHECK_L10N=1, you can force a failure of the test.
ENV_NM_TEST_CLIENT_CHECK_L10N = "NM_TEST_CLIENT_CHECK_L10N"
# Regenerate the .expected files. Instead of asserting, rewrite the files
# on disk with the expected output.
ENV_NM_TEST_REGENERATE = "NM_TEST_REGENERATE"
# whether the file location should include the line number. That is useful
# only for debugging, to correlate the expected output with the test.
# Obviously, since the expected output is commited to git without line numbers,
# you'd have to first NM_TEST_REGENERATE the test expected data, with line
# numbers enabled.
ENV_NM_TEST_WITH_LINENO = "NM_TEST_WITH_LINENO"
ENV_NM_TEST_ASAN_OPTIONS = "NM_TEST_ASAN_OPTIONS"
ENV_NM_TEST_LSAN_OPTIONS = "NM_TEST_LSAN_OPTIONS"
ENV_NM_TEST_UBSAN_OPTIONS = "NM_TEST_UBSAN_OPTIONS"
#
###############################################################################
import sys
import os
import errno
import unittest
import socket
import itertools
import subprocess
import shlex
import re
import fcntl
import dbus
import time
clients/tests: don't wait for first job before scheduling parallel jobs Previously, the test would kick off 15 processes in parallel, but the first job in the queue would block more processes from being started. That is, async_start() would only start 15 processes, but since none of them were reaped before async_wait() was called, no more than 15 jobs were running during the start phase. That is not a real issue, because the start phase is non-blocking and queues all the jobs quickly. It's not really expected that during that time many processes already completed. Anyway, this was a bit ugly. The bigger problem is that async_wait() would always block for the first job to complete, before starting more processes. That means, if the first job in the queue takes unusually long, then this blocks other processes from getting reaped and new processes from being started. Instead, don't block only one one jobs, but poll them in turn for a short amount of time. Whichever process exits first will be completed and more jobs will be started. In fact, in the current setup it's hard to notice any difference, because all nmcli invocations take about the same time and are relatively fast. That this approach parallelizes better can be seen when the runtime of jobs varies stronger (and some invocations take a notably longer time). As we later want to run nmcli under valgrind, this probably will make a difference. An alternative would be not to poll()/wait() for child processes, but somehow get notified. For example, we could use a GMainContext and watch child processes. But that's probably more complicated to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
import random
import dbus.service
import dbus.mainloop.glib
import io
import gi
try:
from gi.repository import GLib
except ImportError:
GLib = None
try:
gi.require_version("NM", "1.0")
except ValueError:
NM = None
else:
try:
from gi.repository import NM
except ImportError:
NM = None
try:
import pexpect
except ImportError:
pexpect = None
###############################################################################
class PathConfiguration:
@staticmethod
def srcdir():
# this is the directory where the test script itself lies.
# Based on this directory, we find other parts that we expect
# in the source repository.
return os.path.dirname(os.path.abspath(__file__))
@staticmethod
def top_srcdir():
return os.path.abspath(PathConfiguration.srcdir() + "/../../..")
@staticmethod
def test_networkmanager_service_path():
v = os.path.abspath(
PathConfiguration.top_srcdir() + "/tools/test-networkmanager-service.py"
)
assert os.path.exists(v), 'Cannot find test server at "%s"' % (v)
return v
@staticmethod
def canonical_script_filename():
p = "src/tests/client/test-client.py"
assert (PathConfiguration.top_srcdir() + "/" + p) == os.path.abspath(__file__)
return p
###############################################################################
dbus_session_inited = False
_DEFAULT_ARG = object()
_UNSTABLE_OUTPUT = object()
###############################################################################
class Util:
_signal_no_lookup = {
1: "SIGHUP",
2: "SIGINT",
3: "SIGQUIT",
4: "SIGILL",
5: "SIGTRAP",
6: "SIGABRT",
8: "SIGFPE",
9: "SIGKILL",
11: "SIGSEGV",
12: "SIGSYS",
13: "SIGPIPE",
14: "SIGALRM",
15: "SIGTERM",
16: "SIGURG",
17: "SIGSTOP",
18: "SIGTSTP",
19: "SIGCONT",
20: "SIGCHLD",
21: "SIGTTIN",
22: "SIGTTOU",
23: "SIGPOLL",
24: "SIGXCPU",
25: "SIGXFSZ",
26: "SIGVTALRM",
27: "SIGPROF",
30: "SIGUSR1",
31: "SIGUSR2",
}
@classmethod
def signal_no_to_str(cls, signal):
s = cls._signal_no_lookup.get(signal, None)
if s is None:
return "<unknown %d>" % (signal)
return s
@staticmethod
def python_has_version(major, minor=0):
return sys.version_info[0] > major or (
sys.version_info[0] == major and sys.version_info[1] >= minor
)
@staticmethod
def is_string(s):
if Util.python_has_version(3):
t = str
else:
t = basestring
return isinstance(s, t)
@staticmethod
def as_bytes(s):
if Util.is_string(s):
return s.encode("utf-8")
assert isinstance(s, bytes)
return s
@staticmethod
def memoize_nullary(nullary_func):
result = []
def closure():
if not result:
result.append(nullary_func())
return result[0]
return closure
_find_unsafe = re.compile(
r"[^\w@%+=:,./-]", re.ASCII if sys.version_info[0] >= 3 else 0
).search
@staticmethod
def quote(s):
if Util.python_has_version(3, 3):
return shlex.quote(s)
if not s:
return "''"
if Util._find_unsafe(s) is None:
return s
return "'" + s.replace("'", "'\"'\"'") + "'"
@staticmethod
def popen_wait(p, timeout=0):
(res, b_stdout, b_stderr) = Util.popen_wait_read(
p, timeout=timeout, read_std_pipes=False
)
return res
@staticmethod
def popen_wait_read(p, timeout=0, read_std_pipes=True):
start = NM.utils_get_timestamp_msec()
delay = 0.0005
b_stdout = b""
b_stderr = b""
res = None
while True:
if read_std_pipes:
b_stdout += Util.buffer_read(p.stdout)
b_stderr += Util.buffer_read(p.stderr)
if p.poll() is not None:
res = p.returncode
break
if timeout == 0:
break
assert timeout > 0
remaining = timeout - ((NM.utils_get_timestamp_msec() - start) / 1000.0)
if remaining <= 0:
break
delay = min(delay * 2, remaining, 0.05)
time.sleep(delay)
return (res, b_stdout, b_stderr)
@staticmethod
def buffer_read(buf):
b = b""
while True:
try:
b1 = buf.read()
except io.BlockingIOError:
b1 = b""
except IOError:
b1 = b""
if not b1:
return b
b += b1
@staticmethod
def buffer_set_nonblock(buf):
fd = buf.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
clients/tests: don't wait for first job before scheduling parallel jobs Previously, the test would kick off 15 processes in parallel, but the first job in the queue would block more processes from being started. That is, async_start() would only start 15 processes, but since none of them were reaped before async_wait() was called, no more than 15 jobs were running during the start phase. That is not a real issue, because the start phase is non-blocking and queues all the jobs quickly. It's not really expected that during that time many processes already completed. Anyway, this was a bit ugly. The bigger problem is that async_wait() would always block for the first job to complete, before starting more processes. That means, if the first job in the queue takes unusually long, then this blocks other processes from getting reaped and new processes from being started. Instead, don't block only one one jobs, but poll them in turn for a short amount of time. Whichever process exits first will be completed and more jobs will be started. In fact, in the current setup it's hard to notice any difference, because all nmcli invocations take about the same time and are relatively fast. That this approach parallelizes better can be seen when the runtime of jobs varies stronger (and some invocations take a notably longer time). As we later want to run nmcli under valgrind, this probably will make a difference. An alternative would be not to poll()/wait() for child processes, but somehow get notified. For example, we could use a GMainContext and watch child processes. But that's probably more complicated to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
@staticmethod
def random_job(jobs):
jobs = list(jobs)
l = len(jobs)
t = l * (l + 1) / 2
while True:
# we return a random jobs from the list, but the indexes at the front of
# the list are more likely. The idea is, that those jobs were started first,
# and are expected to complete first. As we poll, we want to check more frequently
# on the elements at the beginning of the list...
#
# Let's assign probabilities with an arithmetic series.
# That is, if there are 16 jobs, then the first gets weighted
# with 16, the second with 15, then 14, and so on, until the
# last has weight 1. That means, the first element is 16 times
# more probable than the last.
# Element at idx (starting with 0) is picked with probability
# 1 / (l*(l+1)/2) * (l - idx)
r = random.random() * t
idx = 0
rx = 0
while True:
rx += l - idx
clients/tests: don't wait for first job before scheduling parallel jobs Previously, the test would kick off 15 processes in parallel, but the first job in the queue would block more processes from being started. That is, async_start() would only start 15 processes, but since none of them were reaped before async_wait() was called, no more than 15 jobs were running during the start phase. That is not a real issue, because the start phase is non-blocking and queues all the jobs quickly. It's not really expected that during that time many processes already completed. Anyway, this was a bit ugly. The bigger problem is that async_wait() would always block for the first job to complete, before starting more processes. That means, if the first job in the queue takes unusually long, then this blocks other processes from getting reaped and new processes from being started. Instead, don't block only one one jobs, but poll them in turn for a short amount of time. Whichever process exits first will be completed and more jobs will be started. In fact, in the current setup it's hard to notice any difference, because all nmcli invocations take about the same time and are relatively fast. That this approach parallelizes better can be seen when the runtime of jobs varies stronger (and some invocations take a notably longer time). As we later want to run nmcli under valgrind, this probably will make a difference. An alternative would be not to poll()/wait() for child processes, but somehow get notified. For example, we could use a GMainContext and watch child processes. But that's probably more complicated to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
if rx >= r or idx == l - 1:
yield jobs[idx]
break
idx += 1
@staticmethod
def iter_single(itr, min_num=1, max_num=1):
itr = list(itr)
n = 0
v = None
for c in itr:
n += 1
if n > 1:
break
v = c
if n < min_num:
raise AssertionError(
"Expected at least %s elements, but %s found" % (min_num, n)
)
if n > max_num:
raise AssertionError(
"Expected at most %s elements, but %s found" % (max_num, n)
)
return v
@staticmethod
def file_read(filename):
try:
with open(filename, "rb") as f:
return f.read()
except:
return None
@staticmethod
def _replace_text_match_join(split_arr, replacement):
yield split_arr[0]
for t in split_arr[1:]:
yield (replacement,)
yield t
@staticmethod
def ReplaceTextSimple(search, replacement):
# This gives a function that can be used by Util.replace_text().
# The function replaces an input bytes string @t. It must either return
# a bytes string, a list containing bytes strings and/or 1-tuples (the
# latter containing one bytes string).
# The 1-tuple acts as a placeholder for atomic text, that cannot be replaced
# a second time.
#
# Search for replace_text_fcn in Util.replace_text() where this is called.
replacement = Util.as_bytes(replacement)
if callable(search):
search_fcn = search
else:
search_fcn = lambda: search
def replace_fcn(t):
assert isinstance(t, bytes)
search_txt = search_fcn()
if search_txt is None:
return t
search_txt = Util.as_bytes(search_txt)
return Util._replace_text_match_join(t.split(search_txt), replacement)
return replace_fcn
@staticmethod
def ReplaceTextRegex(pattern, replacement):
# See ReplaceTextSimple.
pattern = Util.as_bytes(pattern)
replacement = Util.as_bytes(replacement)
p = re.compile(pattern)
return lambda t: Util._replace_text_match_join(p.split(t), replacement)
clients/tests: fix regular expression match in Util.replace_text() Seems the previous code did not work properly: With python36-3.6.8-38.module+el8.5.0+12207+5c5719bc.x86_6 on rhel-8.6: Traceback (most recent call last): File "/root/nm-build/NetworkManager/src/tests/client/test-client.py", line 1157, in f func(self) File "/root/nm-build/NetworkManager/src/tests/client/test-client.py", line 1724, in test_offline replace_stdout=replace_uuids, File "/root/nm-build/NetworkManager/src/tests/client/test-client.py", line 797, in call_nmcli frame, File "/root/nm-build/NetworkManager/src/tests/client/test-client.py", line 997, in _call_nmcli self.async_start(wait_all=sync_barrier) File "/root/nm-build/NetworkManager/src/tests/client/test-client.py", line 1032, in async_start async_job.wait_and_complete() File "/root/nm-build/NetworkManager/src/tests/client/test-client.py", line 670, in wait_and_complete self._complete_cb(self, return_code, stdout, stderr) File "/root/nm-build/NetworkManager/src/tests/client/test-client.py", line 919, in complete_cb stdout = Util.replace_text(stdout, replace_stdout) File "/root/nm-build/NetworkManager/src/tests/client/test-client.py", line 362, in replace_text if Util.is_regex_pattern(v_search): File "/root/nm-build/NetworkManager/src/tests/client/test-client.py", line 208, in is_regex_pattern t = re.Pattern AttributeError: module 're' has no attribute 'Pattern' On this python version, re.compile() give an object of type _sre.SRE_Pattern. # python -c 'import re; print(type(re.compile("a")))' <class '_sre.SRE_Pattern'> Fixes: beebde9e56c3 ('client/test: allow matching and replacing regex-es in nmcli output')
2022-04-20 08:37:09 +02:00
@staticmethod
def replace_text(text, replace_arr):
if not replace_arr:
return text
needs_encode = Util.python_has_version(3) and Util.is_string(text)
if needs_encode:
text = text.encode("utf-8")
text = [text]
for replace_text_fcn in replace_arr:
text2 = []
for t in text:
# tuples are markers for atomic strings. They won't be replaced a second
# time.
if not isinstance(t, tuple):
t = replace_text_fcn(t)
if isinstance(t, bytes) or isinstance(t, tuple):
text2.append(t)
else:
text2.extend(t)
text = text2
bb = b"".join([(t[0] if isinstance(t, tuple) else t) for t in text])
if needs_encode:
bb = bb.decode("utf-8")
return bb
@staticmethod
def replace_text_sort_list(lst, replace_arr):
lst = [(Util.replace_text(elem, replace_arr), elem) for elem in lst]
lst = sorted(lst)
lst = [tup[1] for tup in lst]
return list(lst)
@staticmethod
def debug_dbus_interface():
# this is for printf debugging, not used in actual code.
os.system(
"busctl --user --verbose call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects | cat"
)
@staticmethod
def iter_nmcli_output_modes():
for mode in [[], ["--mode", "tabular"], ["--mode", "multiline"]]:
for fmt in [[], ["--pretty"], ["--terse"]]:
for color in [[], ["--color", "yes"]]:
2018-06-21 16:51:43 +02:00
yield mode + fmt + color
###############################################################################
class Configuration:
def __init__(self):
self._values = {}
def get(self, name):
v = self._values.get(name, None)
if name in self._values:
return v
if name == ENV_NM_TEST_CLIENT_BUILDDIR:
v = os.environ.get(
ENV_NM_TEST_CLIENT_BUILDDIR, PathConfiguration.top_srcdir()
)
if not os.path.isdir(v):
raise Exception("Missing builddir. Set NM_TEST_CLIENT_BUILDDIR?")
elif name == ENV_NM_TEST_CLIENT_NMCLI_PATH:
v = os.environ.get(ENV_NM_TEST_CLIENT_NMCLI_PATH, None)
if v is None:
try:
v = os.path.abspath(
self.get(ENV_NM_TEST_CLIENT_BUILDDIR) + "/src/nmcli/nmcli"
)
except:
pass
if not os.path.exists(v):
raise Exception("Missing nmcli binary. Set NM_TEST_CLIENT_NMCLI_PATH?")
elif name == ENV_NM_TEST_CLIENT_CHECK_L10N:
# if we test locales other than 'C', the output of nmcli depends on whether
# nmcli can load the translations. Unfortunately, I cannot find a way to
# make gettext use the po/*.gmo files from the build-dir.
#
# hence, such tests only work, if you also issue `make-install`
#
# Only by setting NM_TEST_CLIENT_CHECK_L10N=1, these tests are included
# as well.
v = os.environ.get(ENV_NM_TEST_CLIENT_CHECK_L10N, "0") == "1"
elif name == ENV_NM_TEST_REGENERATE:
# in the "regenerate" mode, the tests will rewrite the files on disk against
# which we assert. That is useful, if there are intentional changes and
# we want to regenerate the expected output.
v = os.environ.get(ENV_NM_TEST_REGENERATE, "0") == "1"
elif name == ENV_NM_TEST_WITH_LINENO:
v = os.environ.get(ENV_NM_TEST_WITH_LINENO, "0") == "1"
elif name in [
ENV_NM_TEST_ASAN_OPTIONS,
ENV_NM_TEST_LSAN_OPTIONS,
ENV_NM_TEST_UBSAN_OPTIONS,
]:
v = os.environ.get(name, None)
if v is None:
if name == ENV_NM_TEST_ASAN_OPTIONS:
v = "detect_leaks=1"
# v += ' fast_unwind_on_malloc=false'
elif name == ENV_NM_TEST_LSAN_OPTIONS:
v = ""
elif name == ENV_NM_TEST_UBSAN_OPTIONS:
v = "print_stacktrace=1:halt_on_error=1"
else:
assert False
else:
raise Exception()
self._values[name] = v
return v
conf = Configuration()
###############################################################################
class NMStubServer:
@staticmethod
def _conn_get_main_object(conn):
try:
return conn.get_object(
"org.freedesktop.NetworkManager", "/org/freedesktop/NetworkManager"
)
except:
return None
clients/tests: seed generated numbers for test-networkmanager-service.py At several places, "test-networkmanager-service.py" uses generated numbers with a defined seed. For example, generated connection's UUID is generated in a predictable, but randomized way (if you forgive the inprecise use of the word "random" in context of using a deterministic seed). Aside the connection's UUID, this becomes more interesting in the next commit where the stub server generates a list of IP and DHCP settings in a predictable randomized way. For "clients/tests" we spawn the test service multiple times, but also create similar environments by calling init_001(). This is done for convenience, where out of lazyness all the tests share one setup. But it's still a good idea that these tests generate slightly different setups, wherever applicable. this increases the possible setups which get tested. For example, the number of static IPv4 addresses (the following commit) is interested to explicitly test for zero or a non-zero number of addresses. If all tests happen to use the same seed, the tests are expected to also generate the same number of addresses, and we miss an opportunity to hit interesting test cases. There is still no guarantee that all interesting cases are hit, the chances are just better. The approach of generating the setup randomly, does not preclude that the stub-server allows to explicitly configure the setup. However, due to the sheer number of combinations that might be interesting to test, it's much simpler to rely on some randomization and have the justifid hope we catch interesting cases. Also in terms of runtime of the test, the cli unit tests should complete within few seconds. Testing every combination would result in huge tests and long runtimes. Also, the patch refactors generating random numbers in "test-networkmanager-service.py". For example, it introduces Util.RandomSeed(), which can be used to generate a sequence of different random numbers. It works by having an internal state and a counter which is combined to chain the seed and generate different numbers on each call.
2018-06-07 17:42:31 +02:00
def __init__(self, seed):
service_path = PathConfiguration.test_networkmanager_service_path()
self._conn = dbus.SessionBus()
clients/tests: seed generated numbers for test-networkmanager-service.py At several places, "test-networkmanager-service.py" uses generated numbers with a defined seed. For example, generated connection's UUID is generated in a predictable, but randomized way (if you forgive the inprecise use of the word "random" in context of using a deterministic seed). Aside the connection's UUID, this becomes more interesting in the next commit where the stub server generates a list of IP and DHCP settings in a predictable randomized way. For "clients/tests" we spawn the test service multiple times, but also create similar environments by calling init_001(). This is done for convenience, where out of lazyness all the tests share one setup. But it's still a good idea that these tests generate slightly different setups, wherever applicable. this increases the possible setups which get tested. For example, the number of static IPv4 addresses (the following commit) is interested to explicitly test for zero or a non-zero number of addresses. If all tests happen to use the same seed, the tests are expected to also generate the same number of addresses, and we miss an opportunity to hit interesting test cases. There is still no guarantee that all interesting cases are hit, the chances are just better. The approach of generating the setup randomly, does not preclude that the stub-server allows to explicitly configure the setup. However, due to the sheer number of combinations that might be interesting to test, it's much simpler to rely on some randomization and have the justifid hope we catch interesting cases. Also in terms of runtime of the test, the cli unit tests should complete within few seconds. Testing every combination would result in huge tests and long runtimes. Also, the patch refactors generating random numbers in "test-networkmanager-service.py". For example, it introduces Util.RandomSeed(), which can be used to generate a sequence of different random numbers. It works by having an internal state and a counter which is combined to chain the seed and generate different numbers on each call.
2018-06-07 17:42:31 +02:00
env = os.environ.copy()
env["NM_TEST_NETWORKMANAGER_SERVICE_SEED"] = seed
p = subprocess.Popen(
[sys.executable, service_path], stdin=subprocess.PIPE, env=env
)
start = NM.utils_get_timestamp_msec()
while True:
if p.poll() is not None:
p.stdin.close()
if p.returncode == 77:
raise unittest.SkipTest(
"the stub service %s exited with status 77" % (service_path)
)
raise Exception(
"the stub service %s exited unexpectedly" % (service_path)
)
nmobj = self._conn_get_main_object(self._conn)
if nmobj is not None:
break
if (NM.utils_get_timestamp_msec() - start) >= 4000:
p.stdin.close()
p.kill()
clients/tests: don't wait for first job before scheduling parallel jobs Previously, the test would kick off 15 processes in parallel, but the first job in the queue would block more processes from being started. That is, async_start() would only start 15 processes, but since none of them were reaped before async_wait() was called, no more than 15 jobs were running during the start phase. That is not a real issue, because the start phase is non-blocking and queues all the jobs quickly. It's not really expected that during that time many processes already completed. Anyway, this was a bit ugly. The bigger problem is that async_wait() would always block for the first job to complete, before starting more processes. That means, if the first job in the queue takes unusually long, then this blocks other processes from getting reaped and new processes from being started. Instead, don't block only one one jobs, but poll them in turn for a short amount of time. Whichever process exits first will be completed and more jobs will be started. In fact, in the current setup it's hard to notice any difference, because all nmcli invocations take about the same time and are relatively fast. That this approach parallelizes better can be seen when the runtime of jobs varies stronger (and some invocations take a notably longer time). As we later want to run nmcli under valgrind, this probably will make a difference. An alternative would be not to poll()/wait() for child processes, but somehow get notified. For example, we could use a GMainContext and watch child processes. But that's probably more complicated to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
Util.popen_wait(p, 1)
raise Exception(
"after starting stub service the D-Bus name was not claimed in time"
)
self._nmobj = nmobj
self._nmiface = dbus.Interface(
nmobj, "org.freedesktop.NetworkManager.LibnmGlibTest"
)
self._p = p
def shutdown(self):
clients/tests: don't wait for first job before scheduling parallel jobs Previously, the test would kick off 15 processes in parallel, but the first job in the queue would block more processes from being started. That is, async_start() would only start 15 processes, but since none of them were reaped before async_wait() was called, no more than 15 jobs were running during the start phase. That is not a real issue, because the start phase is non-blocking and queues all the jobs quickly. It's not really expected that during that time many processes already completed. Anyway, this was a bit ugly. The bigger problem is that async_wait() would always block for the first job to complete, before starting more processes. That means, if the first job in the queue takes unusually long, then this blocks other processes from getting reaped and new processes from being started. Instead, don't block only one one jobs, but poll them in turn for a short amount of time. Whichever process exits first will be completed and more jobs will be started. In fact, in the current setup it's hard to notice any difference, because all nmcli invocations take about the same time and are relatively fast. That this approach parallelizes better can be seen when the runtime of jobs varies stronger (and some invocations take a notably longer time). As we later want to run nmcli under valgrind, this probably will make a difference. An alternative would be not to poll()/wait() for child processes, but somehow get notified. For example, we could use a GMainContext and watch child processes. But that's probably more complicated to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
conn = self._conn
p = self._p
self._nmobj = None
self._nmiface = None
self._conn = None
self._p = None
clients/tests: don't wait for first job before scheduling parallel jobs Previously, the test would kick off 15 processes in parallel, but the first job in the queue would block more processes from being started. That is, async_start() would only start 15 processes, but since none of them were reaped before async_wait() was called, no more than 15 jobs were running during the start phase. That is not a real issue, because the start phase is non-blocking and queues all the jobs quickly. It's not really expected that during that time many processes already completed. Anyway, this was a bit ugly. The bigger problem is that async_wait() would always block for the first job to complete, before starting more processes. That means, if the first job in the queue takes unusually long, then this blocks other processes from getting reaped and new processes from being started. Instead, don't block only one one jobs, but poll them in turn for a short amount of time. Whichever process exits first will be completed and more jobs will be started. In fact, in the current setup it's hard to notice any difference, because all nmcli invocations take about the same time and are relatively fast. That this approach parallelizes better can be seen when the runtime of jobs varies stronger (and some invocations take a notably longer time). As we later want to run nmcli under valgrind, this probably will make a difference. An alternative would be not to poll()/wait() for child processes, but somehow get notified. For example, we could use a GMainContext and watch child processes. But that's probably more complicated to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
p.stdin.close()
p.kill()
if Util.popen_wait(p, 1) is None:
raise Exception("Stub service did not exit in time")
if self._conn_get_main_object(conn) is not None:
raise Exception(
"Stub service is not still here although it should shut down"
)
class _MethodProxy:
def __init__(self, parent, method_name):
self._parent = parent
self._method_name = method_name
def __call__(self, *args, **kwargs):
dbus_iface = kwargs.pop("dbus_iface", None)
if dbus_iface is None:
dbus_iface = self._parent._nmiface
method = dbus_iface.get_dbus_method(self._method_name)
if kwargs:
# for convenience, we allow the caller to specify arguments
# as kwargs. In this case, we construct a a{sv} array as last argument.
args = list(args)
args.append(kwargs)
return method(*args)
def __getattr__(self, member):
if not member.startswith("op_"):
raise AttributeError(member)
return self._MethodProxy(self, member[3:])
def addConnection(self, connection, do_verify_strict=True):
return self.op_AddConnection(connection, do_verify_strict)
def findConnections(self, **kwargs):
if kwargs:
lst = self.op_FindConnections(**kwargs)
else:
lst = self.op_FindConnections({})
return list([(str(elem[0]), str(elem[1]), str(elem[2])) for elem in lst])
def findConnectionUuid(self, con_id, required=True):
try:
u = Util.iter_single(self.findConnections(con_id=con_id))[1]
assert u, "Invalid uuid %s" % (u)
except Exception as e:
if not required:
return None
raise AssertionError(
"Unexpectedly not found connection %s: %s" % (con_id, str(e))
)
return u
def setProperty(self, path, propname, value, iface_name=None):
if iface_name is None:
iface_name = ""
self.op_SetProperties([(path, [(iface_name, [(propname, value)])])])
###############################################################################
class AsyncProcess:
def __init__(self, args, env, complete_cb, max_waittime_msec=20000):
clients/tests: don't wait for first job before scheduling parallel jobs Previously, the test would kick off 15 processes in parallel, but the first job in the queue would block more processes from being started. That is, async_start() would only start 15 processes, but since none of them were reaped before async_wait() was called, no more than 15 jobs were running during the start phase. That is not a real issue, because the start phase is non-blocking and queues all the jobs quickly. It's not really expected that during that time many processes already completed. Anyway, this was a bit ugly. The bigger problem is that async_wait() would always block for the first job to complete, before starting more processes. That means, if the first job in the queue takes unusually long, then this blocks other processes from getting reaped and new processes from being started. Instead, don't block only one one jobs, but poll them in turn for a short amount of time. Whichever process exits first will be completed and more jobs will be started. In fact, in the current setup it's hard to notice any difference, because all nmcli invocations take about the same time and are relatively fast. That this approach parallelizes better can be seen when the runtime of jobs varies stronger (and some invocations take a notably longer time). As we later want to run nmcli under valgrind, this probably will make a difference. An alternative would be not to poll()/wait() for child processes, but somehow get notified. For example, we could use a GMainContext and watch child processes. But that's probably more complicated to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
self._args = list(args)
self._env = env
self._complete_cb = complete_cb
clients/tests: don't wait for first job before scheduling parallel jobs Previously, the test would kick off 15 processes in parallel, but the first job in the queue would block more processes from being started. That is, async_start() would only start 15 processes, but since none of them were reaped before async_wait() was called, no more than 15 jobs were running during the start phase. That is not a real issue, because the start phase is non-blocking and queues all the jobs quickly. It's not really expected that during that time many processes already completed. Anyway, this was a bit ugly. The bigger problem is that async_wait() would always block for the first job to complete, before starting more processes. That means, if the first job in the queue takes unusually long, then this blocks other processes from getting reaped and new processes from being started. Instead, don't block only one one jobs, but poll them in turn for a short amount of time. Whichever process exits first will be completed and more jobs will be started. In fact, in the current setup it's hard to notice any difference, because all nmcli invocations take about the same time and are relatively fast. That this approach parallelizes better can be seen when the runtime of jobs varies stronger (and some invocations take a notably longer time). As we later want to run nmcli under valgrind, this probably will make a difference. An alternative would be not to poll()/wait() for child processes, but somehow get notified. For example, we could use a GMainContext and watch child processes. But that's probably more complicated to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
self._max_waittime_msec = max_waittime_msec
def start(self):
if not hasattr(self, "_p"):
clients/tests: don't wait for first job before scheduling parallel jobs Previously, the test would kick off 15 processes in parallel, but the first job in the queue would block more processes from being started. That is, async_start() would only start 15 processes, but since none of them were reaped before async_wait() was called, no more than 15 jobs were running during the start phase. That is not a real issue, because the start phase is non-blocking and queues all the jobs quickly. It's not really expected that during that time many processes already completed. Anyway, this was a bit ugly. The bigger problem is that async_wait() would always block for the first job to complete, before starting more processes. That means, if the first job in the queue takes unusually long, then this blocks other processes from getting reaped and new processes from being started. Instead, don't block only one one jobs, but poll them in turn for a short amount of time. Whichever process exits first will be completed and more jobs will be started. In fact, in the current setup it's hard to notice any difference, because all nmcli invocations take about the same time and are relatively fast. That this approach parallelizes better can be seen when the runtime of jobs varies stronger (and some invocations take a notably longer time). As we later want to run nmcli under valgrind, this probably will make a difference. An alternative would be not to poll()/wait() for child processes, but somehow get notified. For example, we could use a GMainContext and watch child processes. But that's probably more complicated to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
self._p_start_timestamp = NM.utils_get_timestamp_msec()
self._p_stdout_buf = b""
self._p_stderr_buf = b""
self._p = subprocess.Popen(
self._args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self._env,
)
Util.buffer_set_nonblock(self._p.stdout)
Util.buffer_set_nonblock(self._p.stderr)
clients/tests: don't wait for first job before scheduling parallel jobs Previously, the test would kick off 15 processes in parallel, but the first job in the queue would block more processes from being started. That is, async_start() would only start 15 processes, but since none of them were reaped before async_wait() was called, no more than 15 jobs were running during the start phase. That is not a real issue, because the start phase is non-blocking and queues all the jobs quickly. It's not really expected that during that time many processes already completed. Anyway, this was a bit ugly. The bigger problem is that async_wait() would always block for the first job to complete, before starting more processes. That means, if the first job in the queue takes unusually long, then this blocks other processes from getting reaped and new processes from being started. Instead, don't block only one one jobs, but poll them in turn for a short amount of time. Whichever process exits first will be completed and more jobs will be started. In fact, in the current setup it's hard to notice any difference, because all nmcli invocations take about the same time and are relatively fast. That this approach parallelizes better can be seen when the runtime of jobs varies stronger (and some invocations take a notably longer time). As we later want to run nmcli under valgrind, this probably will make a difference. An alternative would be not to poll()/wait() for child processes, but somehow get notified. For example, we could use a GMainContext and watch child processes. But that's probably more complicated to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
def _timeout_remaining_time(self):
# note that we call this during poll() and wait_and_complete().
# we don't know the exact time when the process terminated,
# so this is only approximately correct, if we call poll/wait
# frequently.
# Worst case, we will think that the process did not time out,
# when in fact it was running longer than max-waittime.
return self._max_waittime_msec - (
NM.utils_get_timestamp_msec() - self._p_start_timestamp
)
def poll(self, timeout=0):
self.start()
(return_code, b_stdout, b_stderr) = Util.popen_wait_read(self._p, timeout)
self._p_stdout_buf += b_stdout
self._p_stderr_buf += b_stderr
if return_code is None and self._timeout_remaining_time() <= 0:
raise Exception(
"process is still running after timeout: %s" % (" ".join(self._args))
)
clients/tests: don't wait for first job before scheduling parallel jobs Previously, the test would kick off 15 processes in parallel, but the first job in the queue would block more processes from being started. That is, async_start() would only start 15 processes, but since none of them were reaped before async_wait() was called, no more than 15 jobs were running during the start phase. That is not a real issue, because the start phase is non-blocking and queues all the jobs quickly. It's not really expected that during that time many processes already completed. Anyway, this was a bit ugly. The bigger problem is that async_wait() would always block for the first job to complete, before starting more processes. That means, if the first job in the queue takes unusually long, then this blocks other processes from getting reaped and new processes from being started. Instead, don't block only one one jobs, but poll them in turn for a short amount of time. Whichever process exits first will be completed and more jobs will be started. In fact, in the current setup it's hard to notice any difference, because all nmcli invocations take about the same time and are relatively fast. That this approach parallelizes better can be seen when the runtime of jobs varies stronger (and some invocations take a notably longer time). As we later want to run nmcli under valgrind, this probably will make a difference. An alternative would be not to poll()/wait() for child processes, but somehow get notified. For example, we could use a GMainContext and watch child processes. But that's probably more complicated to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
return return_code
clients/tests: don't wait for first job before scheduling parallel jobs Previously, the test would kick off 15 processes in parallel, but the first job in the queue would block more processes from being started. That is, async_start() would only start 15 processes, but since none of them were reaped before async_wait() was called, no more than 15 jobs were running during the start phase. That is not a real issue, because the start phase is non-blocking and queues all the jobs quickly. It's not really expected that during that time many processes already completed. Anyway, this was a bit ugly. The bigger problem is that async_wait() would always block for the first job to complete, before starting more processes. That means, if the first job in the queue takes unusually long, then this blocks other processes from getting reaped and new processes from being started. Instead, don't block only one one jobs, but poll them in turn for a short amount of time. Whichever process exits first will be completed and more jobs will be started. In fact, in the current setup it's hard to notice any difference, because all nmcli invocations take about the same time and are relatively fast. That this approach parallelizes better can be seen when the runtime of jobs varies stronger (and some invocations take a notably longer time). As we later want to run nmcli under valgrind, this probably will make a difference. An alternative would be not to poll()/wait() for child processes, but somehow get notified. For example, we could use a GMainContext and watch child processes. But that's probably more complicated to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
def wait_and_complete(self):
self.start()
clients/tests: don't wait for first job before scheduling parallel jobs Previously, the test would kick off 15 processes in parallel, but the first job in the queue would block more processes from being started. That is, async_start() would only start 15 processes, but since none of them were reaped before async_wait() was called, no more than 15 jobs were running during the start phase. That is not a real issue, because the start phase is non-blocking and queues all the jobs quickly. It's not really expected that during that time many processes already completed. Anyway, this was a bit ugly. The bigger problem is that async_wait() would always block for the first job to complete, before starting more processes. That means, if the first job in the queue takes unusually long, then this blocks other processes from getting reaped and new processes from being started. Instead, don't block only one one jobs, but poll them in turn for a short amount of time. Whichever process exits first will be completed and more jobs will be started. In fact, in the current setup it's hard to notice any difference, because all nmcli invocations take about the same time and are relatively fast. That this approach parallelizes better can be seen when the runtime of jobs varies stronger (and some invocations take a notably longer time). As we later want to run nmcli under valgrind, this probably will make a difference. An alternative would be not to poll()/wait() for child processes, but somehow get notified. For example, we could use a GMainContext and watch child processes. But that's probably more complicated to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
p = self._p
self._p = None
(return_code, b_stdout, b_stderr) = Util.popen_wait_read(
p, max(0, self._timeout_remaining_time()) / 1000
)
clients/tests: don't wait for first job before scheduling parallel jobs Previously, the test would kick off 15 processes in parallel, but the first job in the queue would block more processes from being started. That is, async_start() would only start 15 processes, but since none of them were reaped before async_wait() was called, no more than 15 jobs were running during the start phase. That is not a real issue, because the start phase is non-blocking and queues all the jobs quickly. It's not really expected that during that time many processes already completed. Anyway, this was a bit ugly. The bigger problem is that async_wait() would always block for the first job to complete, before starting more processes. That means, if the first job in the queue takes unusually long, then this blocks other processes from getting reaped and new processes from being started. Instead, don't block only one one jobs, but poll them in turn for a short amount of time. Whichever process exits first will be completed and more jobs will be started. In fact, in the current setup it's hard to notice any difference, because all nmcli invocations take about the same time and are relatively fast. That this approach parallelizes better can be seen when the runtime of jobs varies stronger (and some invocations take a notably longer time). As we later want to run nmcli under valgrind, this probably will make a difference. An alternative would be not to poll()/wait() for child processes, but somehow get notified. For example, we could use a GMainContext and watch child processes. But that's probably more complicated to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
(stdout, stderr) = (p.stdout.read(), p.stderr.read())
p.stdout.close()
p.stderr.close()
stdout = self._p_stdout_buf + b_stdout + stdout
stderr = self._p_stderr_buf + b_stderr + stderr
del self._p_stdout_buf
del self._p_stderr_buf
clients/tests: don't wait for first job before scheduling parallel jobs Previously, the test would kick off 15 processes in parallel, but the first job in the queue would block more processes from being started. That is, async_start() would only start 15 processes, but since none of them were reaped before async_wait() was called, no more than 15 jobs were running during the start phase. That is not a real issue, because the start phase is non-blocking and queues all the jobs quickly. It's not really expected that during that time many processes already completed. Anyway, this was a bit ugly. The bigger problem is that async_wait() would always block for the first job to complete, before starting more processes. That means, if the first job in the queue takes unusually long, then this blocks other processes from getting reaped and new processes from being started. Instead, don't block only one one jobs, but poll them in turn for a short amount of time. Whichever process exits first will be completed and more jobs will be started. In fact, in the current setup it's hard to notice any difference, because all nmcli invocations take about the same time and are relatively fast. That this approach parallelizes better can be seen when the runtime of jobs varies stronger (and some invocations take a notably longer time). As we later want to run nmcli under valgrind, this probably will make a difference. An alternative would be not to poll()/wait() for child processes, but somehow get notified. For example, we could use a GMainContext and watch child processes. But that's probably more complicated to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
if return_code is None:
print(stdout)
print(stderr)
raise Exception(
"process did not complete in time: %s" % (" ".join(self._args))
)
clients/tests: don't wait for first job before scheduling parallel jobs Previously, the test would kick off 15 processes in parallel, but the first job in the queue would block more processes from being started. That is, async_start() would only start 15 processes, but since none of them were reaped before async_wait() was called, no more than 15 jobs were running during the start phase. That is not a real issue, because the start phase is non-blocking and queues all the jobs quickly. It's not really expected that during that time many processes already completed. Anyway, this was a bit ugly. The bigger problem is that async_wait() would always block for the first job to complete, before starting more processes. That means, if the first job in the queue takes unusually long, then this blocks other processes from getting reaped and new processes from being started. Instead, don't block only one one jobs, but poll them in turn for a short amount of time. Whichever process exits first will be completed and more jobs will be started. In fact, in the current setup it's hard to notice any difference, because all nmcli invocations take about the same time and are relatively fast. That this approach parallelizes better can be seen when the runtime of jobs varies stronger (and some invocations take a notably longer time). As we later want to run nmcli under valgrind, this probably will make a difference. An alternative would be not to poll()/wait() for child processes, but somehow get notified. For example, we could use a GMainContext and watch child processes. But that's probably more complicated to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
self._complete_cb(self, return_code, stdout, stderr)
###############################################################################
class NmTestBase(unittest.TestCase):
def __init__(self, *args, **kwargs):
self._calling_num = {}
self._skip_test_for_l10n_diff = []
self._async_jobs = []
self._results = []
self.srv = None
return unittest.TestCase.__init__(self, *args, **kwargs)
clients/tests: don't wait for first job before scheduling parallel jobs Previously, the test would kick off 15 processes in parallel, but the first job in the queue would block more processes from being started. That is, async_start() would only start 15 processes, but since none of them were reaped before async_wait() was called, no more than 15 jobs were running during the start phase. That is not a real issue, because the start phase is non-blocking and queues all the jobs quickly. It's not really expected that during that time many processes already completed. Anyway, this was a bit ugly. The bigger problem is that async_wait() would always block for the first job to complete, before starting more processes. That means, if the first job in the queue takes unusually long, then this blocks other processes from getting reaped and new processes from being started. Instead, don't block only one one jobs, but poll them in turn for a short amount of time. Whichever process exits first will be completed and more jobs will be started. In fact, in the current setup it's hard to notice any difference, because all nmcli invocations take about the same time and are relatively fast. That this approach parallelizes better can be seen when the runtime of jobs varies stronger (and some invocations take a notably longer time). As we later want to run nmcli under valgrind, this probably will make a difference. An alternative would be not to poll()/wait() for child processes, but somehow get notified. For example, we could use a GMainContext and watch child processes. But that's probably more complicated to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
MAX_JOBS = 15
class TestNmcli(NmTestBase):
def ReplaceTextConUuid(self, con_name, replacement):
return Util.ReplaceTextSimple(
Util.memoize_nullary(lambda: self.srv.findConnectionUuid(con_name)),
replacement,
)
@staticmethod
def _read_expected(filename):
results_expect = []
content_expect = Util.file_read(filename)
try:
base_idx = 0
size_prefix = "size: ".encode("utf8")
while True:
if not content_expect[base_idx : base_idx + 10].startswith(size_prefix):
raise Exception("Unexpected token")
j = base_idx + len(size_prefix)
i = j
if Util.python_has_version(3, 0):
eol = ord("\n")
else:
eol = "\n"
while content_expect[i] != eol:
i += 1
i = i + 1 + int(content_expect[j:i])
results_expect.append(content_expect[base_idx:i])
if len(content_expect) == i:
break
base_idx = i
except Exception as e:
results_expect = None
return content_expect, results_expect
def call_nmcli_l(
self,
args,
check_on_disk=_DEFAULT_ARG,
fatal_warnings=_DEFAULT_ARG,
expected_returncode=_DEFAULT_ARG,
expected_stdout=_DEFAULT_ARG,
expected_stderr=_DEFAULT_ARG,
replace_stdout=None,
replace_stderr=None,
replace_cmd=None,
sort_lines_stdout=False,
extra_env=None,
sync_barrier=False,
):
frame = sys._getframe(1)
for lang in ["C", "pl"]:
self._call_nmcli(
args,
lang,
check_on_disk,
fatal_warnings,
expected_returncode,
expected_stdout,
expected_stderr,
replace_stdout,
replace_stderr,
replace_cmd,
sort_lines_stdout,
extra_env,
sync_barrier,
frame,
)
def call_nmcli(
self,
args,
langs=None,
lang=None,
check_on_disk=_DEFAULT_ARG,
fatal_warnings=_DEFAULT_ARG,
expected_returncode=_DEFAULT_ARG,
expected_stdout=_DEFAULT_ARG,
expected_stderr=_DEFAULT_ARG,
replace_stdout=None,
replace_stderr=None,
replace_cmd=None,
sort_lines_stdout=False,
extra_env=None,
sync_barrier=None,
):
frame = sys._getframe(1)
if langs is not None:
assert lang is None
else:
if lang is None:
lang = "C"
langs = [lang]
if sync_barrier is None:
sync_barrier = len(langs) == 1
for lang in langs:
self._call_nmcli(
args,
lang,
check_on_disk,
fatal_warnings,
expected_returncode,
expected_stdout,
expected_stderr,
replace_stdout,
replace_stderr,
replace_cmd,
sort_lines_stdout,
extra_env,
sync_barrier,
frame,
)
def call_nmcli_pexpect(self, args):
env = self._env()
return pexpect.spawn(
conf.get(ENV_NM_TEST_CLIENT_NMCLI_PATH), args, timeout=5, env=env
)
def _env(
self, lang="C", calling_num=None, fatal_warnings=_DEFAULT_ARG, extra_env=None
):
if lang == "C":
language = ""
elif lang == "de_DE.utf8":
language = "de"
elif lang == "pl_PL.UTF-8":
language = "pl"
else:
self.fail("invalid language %s" % (lang))
env = {}
for k in ["LD_LIBRARY_PATH", "DBUS_SESSION_BUS_ADDRESS"]:
val = os.environ.get(k, None)
if val is not None:
env[k] = val
env["LANG"] = lang
env["LANGUAGE"] = language
env["LIBNM_USE_SESSION_BUS"] = "1"
env["LIBNM_USE_NO_UDEV"] = "1"
env["TERM"] = "linux"
env["ASAN_OPTIONS"] = conf.get(ENV_NM_TEST_ASAN_OPTIONS)
env["LSAN_OPTIONS"] = conf.get(ENV_NM_TEST_LSAN_OPTIONS)
env["LBSAN_OPTIONS"] = conf.get(ENV_NM_TEST_UBSAN_OPTIONS)
env["XDG_CONFIG_HOME"] = PathConfiguration.srcdir()
if calling_num is not None:
env["NM_TEST_CALLING_NUM"] = str(calling_num)
if fatal_warnings is _DEFAULT_ARG or fatal_warnings:
env["G_DEBUG"] = "fatal-warnings"
if extra_env is not None:
for k, v in extra_env.items():
env[k] = v
return env
def _call_nmcli(
self,
args,
lang,
check_on_disk,
fatal_warnings,
expected_returncode,
expected_stdout,
expected_stderr,
replace_stdout,
replace_stderr,
replace_cmd,
sort_lines_stdout,
extra_env,
sync_barrier,
frame,
):
if sync_barrier:
self.async_wait()
calling_fcn = frame.f_code.co_name
calling_num = self._calling_num.get(calling_fcn, 0) + 1
self._calling_num[calling_fcn] = calling_num
test_name = "%s-%03d" % (calling_fcn, calling_num)
# we cannot use frame.f_code.co_filename directly, because it might be different depending
# on where the file lies and which is CWD. We still want to give the location of
# the file, so that the user can easier find the source (when looking at the .expected files)
self.assertTrue(
os.path.abspath(frame.f_code.co_filename).endswith(
"/" + PathConfiguration.canonical_script_filename()
)
)
if conf.get(ENV_NM_TEST_WITH_LINENO):
calling_location = "%s:%d:%s()/%d" % (
PathConfiguration.canonical_script_filename(),
frame.f_lineno,
frame.f_code.co_name,
calling_num,
)
else:
calling_location = "%s:%s()/%d" % (
PathConfiguration.canonical_script_filename(),
frame.f_code.co_name,
calling_num,
)
if lang is None or lang == "C":
lang = "C"
elif lang == "de":
lang = "de_DE.utf8"
elif lang == "pl":
lang = "pl_PL.UTF-8"
else:
self.fail("invalid language %s" % (lang))
args = [conf.get(ENV_NM_TEST_CLIENT_NMCLI_PATH)] + list(args)
if replace_stdout is not None:
replace_stdout = list(replace_stdout)
if replace_stderr is not None:
replace_stderr = list(replace_stderr)
if replace_cmd is not None:
replace_cmd = list(replace_cmd)
if check_on_disk is _DEFAULT_ARG:
check_on_disk = (
expected_returncode is _DEFAULT_ARG
and (
expected_stdout is _DEFAULT_ARG
or expected_stdout is _UNSTABLE_OUTPUT
)
and (
expected_stderr is _DEFAULT_ARG
or expected_stderr is _UNSTABLE_OUTPUT
)
)
if expected_returncode is _DEFAULT_ARG:
expected_returncode = None
if expected_stdout is _DEFAULT_ARG:
expected_stdout = None
if expected_stderr is _DEFAULT_ARG:
expected_stderr = None
clients/tests: don't wait for first job before scheduling parallel jobs Previously, the test would kick off 15 processes in parallel, but the first job in the queue would block more processes from being started. That is, async_start() would only start 15 processes, but since none of them were reaped before async_wait() was called, no more than 15 jobs were running during the start phase. That is not a real issue, because the start phase is non-blocking and queues all the jobs quickly. It's not really expected that during that time many processes already completed. Anyway, this was a bit ugly. The bigger problem is that async_wait() would always block for the first job to complete, before starting more processes. That means, if the first job in the queue takes unusually long, then this blocks other processes from getting reaped and new processes from being started. Instead, don't block only one one jobs, but poll them in turn for a short amount of time. Whichever process exits first will be completed and more jobs will be started. In fact, in the current setup it's hard to notice any difference, because all nmcli invocations take about the same time and are relatively fast. That this approach parallelizes better can be seen when the runtime of jobs varies stronger (and some invocations take a notably longer time). As we later want to run nmcli under valgrind, this probably will make a difference. An alternative would be not to poll()/wait() for child processes, but somehow get notified. For example, we could use a GMainContext and watch child processes. But that's probably more complicated to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
results_idx = len(self._results)
self._results.append(None)
def complete_cb(async_job, returncode, stdout, stderr):
if expected_stdout is _UNSTABLE_OUTPUT:
stdout = "<UNSTABLE OUTPUT>".encode("utf-8")
else:
stdout = Util.replace_text(stdout, replace_stdout)
if expected_stderr is _UNSTABLE_OUTPUT:
stderr = "<UNSTABLE OUTPUT>".encode("utf-8")
else:
stderr = Util.replace_text(stderr, replace_stderr)
if sort_lines_stdout:
stdout = b"\n".join(sorted(stdout.split(b"\n")))
ignore_l10n_diff = lang != "C" and not conf.get(
ENV_NM_TEST_CLIENT_CHECK_L10N
)
if expected_stderr is not None and expected_stderr is not _UNSTABLE_OUTPUT:
if expected_stderr != stderr:
if ignore_l10n_diff:
self._skip_test_for_l10n_diff.append(test_name)
else:
self.assertEqual(expected_stderr, stderr)
if expected_stdout is not None and expected_stdout is not _UNSTABLE_OUTPUT:
if expected_stdout != stdout:
if ignore_l10n_diff:
self._skip_test_for_l10n_diff.append(test_name)
else:
self.assertEqual(expected_stdout, stdout)
if expected_returncode is not None:
self.assertEqual(expected_returncode, returncode)
if fatal_warnings is _DEFAULT_ARG:
if expected_returncode != -5:
self.assertNotEqual(returncode, -5)
elif fatal_warnings:
if expected_returncode is None:
self.assertEqual(returncode, -5)
if check_on_disk:
cmd = "$NMCLI %s" % (" ".join([Util.quote(a) for a in args[1:]]))
cmd = Util.replace_text(cmd, replace_cmd)
if returncode < 0:
returncode_str = "%d (SIGNAL %s)" % (
returncode,
Util.signal_no_to_str(-returncode),
)
else:
returncode_str = "%d" % (returncode)
content = (
("location: %s\n" % (calling_location)).encode("utf8")
+ ("cmd: %s\n" % (cmd)).encode("utf8")
+ ("lang: %s\n" % (lang)).encode("utf8")
+ ("returncode: %s\n" % (returncode_str)).encode("utf8")
)
if len(stdout) > 0:
content += (
("stdout: %d bytes\n>>>\n" % (len(stdout))).encode("utf8")
+ stdout
+ "\n<<<\n".encode("utf8")
)
if len(stderr) > 0:
content += (
("stderr: %d bytes\n>>>\n" % (len(stderr))).encode("utf8")
+ stderr
+ "\n<<<\n".encode("utf8")
)
content = ("size: %s\n" % (len(content))).encode("utf8") + content
clients/tests: don't wait for first job before scheduling parallel jobs Previously, the test would kick off 15 processes in parallel, but the first job in the queue would block more processes from being started. That is, async_start() would only start 15 processes, but since none of them were reaped before async_wait() was called, no more than 15 jobs were running during the start phase. That is not a real issue, because the start phase is non-blocking and queues all the jobs quickly. It's not really expected that during that time many processes already completed. Anyway, this was a bit ugly. The bigger problem is that async_wait() would always block for the first job to complete, before starting more processes. That means, if the first job in the queue takes unusually long, then this blocks other processes from getting reaped and new processes from being started. Instead, don't block only one one jobs, but poll them in turn for a short amount of time. Whichever process exits first will be completed and more jobs will be started. In fact, in the current setup it's hard to notice any difference, because all nmcli invocations take about the same time and are relatively fast. That this approach parallelizes better can be seen when the runtime of jobs varies stronger (and some invocations take a notably longer time). As we later want to run nmcli under valgrind, this probably will make a difference. An alternative would be not to poll()/wait() for child processes, but somehow get notified. For example, we could use a GMainContext and watch child processes. But that's probably more complicated to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
self._results[results_idx] = {
"test_name": test_name,
"ignore_l10n_diff": ignore_l10n_diff,
"content": content,
clients/tests: don't wait for first job before scheduling parallel jobs Previously, the test would kick off 15 processes in parallel, but the first job in the queue would block more processes from being started. That is, async_start() would only start 15 processes, but since none of them were reaped before async_wait() was called, no more than 15 jobs were running during the start phase. That is not a real issue, because the start phase is non-blocking and queues all the jobs quickly. It's not really expected that during that time many processes already completed. Anyway, this was a bit ugly. The bigger problem is that async_wait() would always block for the first job to complete, before starting more processes. That means, if the first job in the queue takes unusually long, then this blocks other processes from getting reaped and new processes from being started. Instead, don't block only one one jobs, but poll them in turn for a short amount of time. Whichever process exits first will be completed and more jobs will be started. In fact, in the current setup it's hard to notice any difference, because all nmcli invocations take about the same time and are relatively fast. That this approach parallelizes better can be seen when the runtime of jobs varies stronger (and some invocations take a notably longer time). As we later want to run nmcli under valgrind, this probably will make a difference. An alternative would be not to poll()/wait() for child processes, but somehow get notified. For example, we could use a GMainContext and watch child processes. But that's probably more complicated to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
}
env = self._env(lang, calling_num, fatal_warnings, extra_env)
async_job = AsyncProcess(args=args, env=env, complete_cb=complete_cb)
self._async_jobs.append(async_job)
self.async_start(wait_all=sync_barrier)
clients/tests: don't wait for first job before scheduling parallel jobs Previously, the test would kick off 15 processes in parallel, but the first job in the queue would block more processes from being started. That is, async_start() would only start 15 processes, but since none of them were reaped before async_wait() was called, no more than 15 jobs were running during the start phase. That is not a real issue, because the start phase is non-blocking and queues all the jobs quickly. It's not really expected that during that time many processes already completed. Anyway, this was a bit ugly. The bigger problem is that async_wait() would always block for the first job to complete, before starting more processes. That means, if the first job in the queue takes unusually long, then this blocks other processes from getting reaped and new processes from being started. Instead, don't block only one one jobs, but poll them in turn for a short amount of time. Whichever process exits first will be completed and more jobs will be started. In fact, in the current setup it's hard to notice any difference, because all nmcli invocations take about the same time and are relatively fast. That this approach parallelizes better can be seen when the runtime of jobs varies stronger (and some invocations take a notably longer time). As we later want to run nmcli under valgrind, this probably will make a difference. An alternative would be not to poll()/wait() for child processes, but somehow get notified. For example, we could use a GMainContext and watch child processes. But that's probably more complicated to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
def async_start(self, wait_all=False):
clients/tests: don't wait for first job before scheduling parallel jobs Previously, the test would kick off 15 processes in parallel, but the first job in the queue would block more processes from being started. That is, async_start() would only start 15 processes, but since none of them were reaped before async_wait() was called, no more than 15 jobs were running during the start phase. That is not a real issue, because the start phase is non-blocking and queues all the jobs quickly. It's not really expected that during that time many processes already completed. Anyway, this was a bit ugly. The bigger problem is that async_wait() would always block for the first job to complete, before starting more processes. That means, if the first job in the queue takes unusually long, then this blocks other processes from getting reaped and new processes from being started. Instead, don't block only one one jobs, but poll them in turn for a short amount of time. Whichever process exits first will be completed and more jobs will be started. In fact, in the current setup it's hard to notice any difference, because all nmcli invocations take about the same time and are relatively fast. That this approach parallelizes better can be seen when the runtime of jobs varies stronger (and some invocations take a notably longer time). As we later want to run nmcli under valgrind, this probably will make a difference. An alternative would be not to poll()/wait() for child processes, but somehow get notified. For example, we could use a GMainContext and watch child processes. But that's probably more complicated to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
while True:
while True:
for async_job in list(self._async_jobs[0:MAX_JOBS]):
async_job.start()
# start up to MAX_JOBS jobs, but poll() and complete those
# that are already exited. Retry, until there are no more
# jobs to start, or until MAX_JOBS are running.
jobs_running = []
for async_job in list(self._async_jobs[0:MAX_JOBS]):
if async_job.poll() is not None:
self._async_jobs.remove(async_job)
async_job.wait_and_complete()
continue
jobs_running.append(async_job)
if len(jobs_running) >= len(self._async_jobs):
break
if len(jobs_running) >= MAX_JOBS:
break
if not jobs_running:
return
if not wait_all:
return
# in a loop, indefinitely poll the running jobs until we find one that
# completes. Note that poll() itself will raise an exception if a
# jobs times out.
for async_job in Util.random_job(jobs_running):
if async_job.poll(timeout=0.03) is not None:
clients/tests: don't wait for first job before scheduling parallel jobs Previously, the test would kick off 15 processes in parallel, but the first job in the queue would block more processes from being started. That is, async_start() would only start 15 processes, but since none of them were reaped before async_wait() was called, no more than 15 jobs were running during the start phase. That is not a real issue, because the start phase is non-blocking and queues all the jobs quickly. It's not really expected that during that time many processes already completed. Anyway, this was a bit ugly. The bigger problem is that async_wait() would always block for the first job to complete, before starting more processes. That means, if the first job in the queue takes unusually long, then this blocks other processes from getting reaped and new processes from being started. Instead, don't block only one one jobs, but poll them in turn for a short amount of time. Whichever process exits first will be completed and more jobs will be started. In fact, in the current setup it's hard to notice any difference, because all nmcli invocations take about the same time and are relatively fast. That this approach parallelizes better can be seen when the runtime of jobs varies stronger (and some invocations take a notably longer time). As we later want to run nmcli under valgrind, this probably will make a difference. An alternative would be not to poll()/wait() for child processes, but somehow get notified. For example, we could use a GMainContext and watch child processes. But that's probably more complicated to do, so let's keep the naive approach with polling.
2019-10-12 11:02:21 +02:00
self._async_jobs.remove(async_job)
async_job.wait_and_complete()
break
def async_wait(self):
return self.async_start(wait_all=True)
def _nm_test_post(self):
self.async_wait()
if self.srv is not None:
self.srv.shutdown()
self.srv = None
self._calling_num = None
results = self._results
self._results = None
if len(results) == 0:
return
skip_test_for_l10n_diff = self._skip_test_for_l10n_diff
self._skip_test_for_l10n_diff = None
test_name = self._testMethodName
filename = os.path.abspath(
PathConfiguration.srcdir()
+ "/test-client.check-on-disk/"
+ test_name
+ ".expected"
)
regenerate = conf.get(ENV_NM_TEST_REGENERATE)
content_expect, results_expect = self._read_expected(filename)
if results_expect is None:
if not regenerate:
self.fail(
"Failed to parse expected file '%s'. Let the test write the file by rerunning with NM_TEST_REGENERATE=1"
% (filename)
)
else:
for i in range(0, min(len(results_expect), len(results))):
n = results[i]
if results_expect[i] == n["content"]:
continue
if regenerate:
continue
if n["ignore_l10n_diff"]:
skip_test_for_l10n_diff.append(n["test_name"])
continue
print(
"\n\n\nThe file '%s' does not have the expected content:"
% (filename)
)
print("ACTUAL OUTPUT:\n[[%s]]\n" % (n["content"]))
2018-10-31 11:29:26 +01:00
print("EXPECT OUTPUT:\n[[%s]]\n" % (results_expect[i]))
print(
"Let the test write the file by rerunning with NM_TEST_REGENERATE=1"
)
print(
"See howto in %s for details.\n"
% (PathConfiguration.canonical_script_filename())
)
sys.stdout.flush()
self.fail(
"Unexpected output of command, expected %s. Rerun test with NM_TEST_REGENERATE=1 to regenerate files"
% (filename)
)
if len(results_expect) != len(results):
if not regenerate:
print(
"\n\n\nThe number of tests in %s does not match the expected content (%s vs %s):"
% (filename, len(results_expect), len(results))
)
if len(results_expect) < len(results):
print(
"ACTUAL OUTPUT:\n[[%s]]\n"
% (results[len(results_expect)]["content"])
)
else:
print(
"EXPECT OUTPUT:\n[[%s]]\n" % (results_expect[len(results)])
)
print(
"Let the test write the file by rerunning with NM_TEST_REGENERATE=1"
)
print(
"See howto in %s for details.\n"
% (PathConfiguration.canonical_script_filename())
)
sys.stdout.flush()
self.fail(
"Unexpected output of command, expected %s. Rerun test with NM_TEST_REGENERATE=1 to regenerate files"
% (filename)
)
if regenerate:
content_new = b"".join([r["content"] for r in results])
if content_new != content_expect:
try:
with open(filename, "wb") as content_file:
content_file.write(content_new)
except Exception as e:
self.fail("Failure to write '%s': %s" % (filename, e))
if skip_test_for_l10n_diff:
# nmcli loads translations from the installation path. This failure commonly
# happens because you did not install the binary in the --prefix, before
# running the test. Hence, translations are not available or differ.
self.skipTest(
"Skipped asserting for localized tests %s. Set NM_TEST_CLIENT_CHECK_L10N=1 to force fail."
% (",".join(skip_test_for_l10n_diff))
)
def skip_without_pexpect(func):
def f(self):
if pexpect is None:
raise unittest.SkipTest("pexpect not available")
func(self)
return f
def nm_test(func):
def f(self):
self.srv = NMStubServer(self._testMethodName)
func(self)
self._nm_test_post()
return f
def nm_test_no_dbus(func):
def f(self):
func(self)
self._nm_test_post()
return f
def setUp(self):
if not dbus_session_inited:
self.skipTest(
"Own D-Bus session for testing is not initialized. Do you have dbus-run-session available?"
)
if NM is None:
self.skipTest("gi.NM is not available. Did you build with introspection?")
def init_001(self):
self.srv.op_AddObj("WiredDevice", iface="eth0")
self.srv.op_AddObj("WiredDevice", iface="eth1")
self.srv.op_AddObj("WifiDevice", iface="wlan0")
self.srv.op_AddObj("WifiDevice", iface="wlan1")
# add another device with an identical ifname. The D-Bus API itself
# does not enforce the ifnames are unique.
self.srv.op_AddObj("WifiDevice", ident="wlan1/x", iface="wlan1")
self.srv.op_AddObj("WifiAp", device="wlan0", rsnf=0x0)
self.srv.op_AddObj("WifiAp", device="wlan0")
NM_AP_FLAGS = getattr(NM, "80211ApSecurityFlags")
rsnf = 0x0
rsnf = rsnf | NM_AP_FLAGS.PAIR_TKIP
rsnf = rsnf | NM_AP_FLAGS.PAIR_CCMP
rsnf = rsnf | NM_AP_FLAGS.GROUP_TKIP
rsnf = rsnf | NM_AP_FLAGS.GROUP_CCMP
rsnf = rsnf | NM_AP_FLAGS.KEY_MGMT_SAE
self.srv.op_AddObj("WifiAp", device="wlan0", wpaf=0x0, rsnf=rsnf)
self.srv.op_AddObj("WifiAp", device="wlan1")
self.srv.addConnection(
{"connection": {"type": "802-3-ethernet", "id": "con-1"}}
)
@nm_test
def test_001(self):
self.call_nmcli_l([])
self.call_nmcli_l(
["-f", "AP", "-mode", "multiline", "-p", "d", "show", "wlan0"]
)
self.call_nmcli_l(["c", "s"])
self.call_nmcli_l(["bogus", "s"])
for mode in Util.iter_nmcli_output_modes():
self.call_nmcli_l(mode + ["general", "permissions"])
@nm_test
def test_002(self):
self.init_001()
self.call_nmcli_l(["d"])
self.call_nmcli_l(["-f", "all", "d"])
self.call_nmcli_l([])
self.call_nmcli_l(["-f", "AP", "-mode", "multiline", "d", "show", "wlan0"])
self.call_nmcli_l(
["-f", "AP", "-mode", "multiline", "-p", "d", "show", "wlan0"]
)
self.call_nmcli_l(
["-f", "AP", "-mode", "multiline", "-t", "d", "show", "wlan0"]
)
self.call_nmcli_l(["-f", "AP", "-mode", "tabular", "d", "show", "wlan0"])
self.call_nmcli_l(["-f", "AP", "-mode", "tabular", "-p", "d", "show", "wlan0"])
self.call_nmcli_l(["-f", "AP", "-mode", "tabular", "-t", "d", "show", "wlan0"])
self.call_nmcli_l(["-f", "ALL", "d", "wifi"])
self.call_nmcli_l(["c"])
self.call_nmcli_l(["c", "s", "con-1"])
@nm_test
def test_003(self):
con_gsm_list = [
("con-gsm1", "xyz.con-gsm1"),
("con-gsm2", ""),
("con-gsm3", " "),
]
self.init_001()
replace_uuids = []
replace_uuids.append(
self.ReplaceTextConUuid("con-xx1", "UUID-con-xx1-REPLACED-REPLACED-REPLA")
)
self.call_nmcli(
["c", "add", "type", "ethernet", "ifname", "*", "con-name", "con-xx1"],
replace_stdout=replace_uuids,
)
self.call_nmcli_l(["c", "s"], replace_stdout=replace_uuids)
for con_name, apn in con_gsm_list:
replace_uuids.append(
self.ReplaceTextConUuid(
con_name, "UUID-" + con_name + "-REPLACED-REPLACED-REPL"
)
)
self.call_nmcli(
[
"connection",
"add",
"type",
"gsm",
"autoconnect",
"no",
"con-name",
con_name,
"ifname",
"*",
"apn",
apn,
"serial.baud",
"5",
"serial.send-delay",
"100",
"serial.pari",
"1",
"ipv4.dns-options",
" ",
],
replace_stdout=replace_uuids,
)
replace_uuids.append(
self.ReplaceTextConUuid("ethernet", "UUID-ethernet-REPLACED-REPLACED-REPL")
)
self.call_nmcli(
["c", "add", "type", "ethernet", "ifname", "*"],
replace_stdout=replace_uuids,
)
self.call_nmcli_l(["c", "s"], replace_stdout=replace_uuids)
self.call_nmcli_l(["-f", "ALL", "c", "s"], replace_stdout=replace_uuids)
self.call_nmcli_l(
["--complete-args", "-f", "ALL", "c", "s", ""],
replace_stdout=replace_uuids,
sort_lines_stdout=True,
)
for con_name, apn in con_gsm_list:
self.call_nmcli_l(["con", "s", con_name], replace_stdout=replace_uuids)
self.call_nmcli_l(
["-g", "all", "con", "s", con_name], replace_stdout=replace_uuids
)
# activate the same profile on multiple devices. Our stub-implmentation
# is fine with that... although NetworkManager service would reject
# such a configuration by deactivating the profile first. But note that
# that is only an internal behavior of NetworkManager service. The D-Bus
# API perfectly allows for one profile to be active multiple times. Also
# note, that there is always a short time where one profile goes down,
# while another is activating. Hence, while real NetworkManager commonly
# does not allow that multiple profiles *stay* connected at the same
# time, there is always the possibility that a profile is activating/active
# on a device, while also activating/deactivating in parallel.
for dev in ["eth0", "eth1"]:
self.call_nmcli(["con", "up", "ethernet", "ifname", dev])
self.call_nmcli_l(["con"], replace_stdout=replace_uuids)
self.call_nmcli_l(["-f", "ALL", "con"], replace_stdout=replace_uuids)
self.call_nmcli_l(
["-f", "ALL", "con", "s", "-a"], replace_stdout=replace_uuids
)
self.call_nmcli_l(
["-f", "ACTIVE-PATH,DEVICE,UUID", "con", "s", "-act"],
replace_stdout=replace_uuids,
)
self.call_nmcli_l(
["-f", "UUID,NAME", "con", "s", "--active"],
replace_stdout=replace_uuids,
)
self.call_nmcli_l(
["-f", "ALL", "con", "s", "ethernet"], replace_stdout=replace_uuids
)
self.call_nmcli_l(
["-f", "GENERAL.STATE", "con", "s", "ethernet"],
replace_stdout=replace_uuids,
)
self.call_nmcli_l(["con", "s", "ethernet"], replace_stdout=replace_uuids)
self.call_nmcli_l(
["-f", "ALL", "dev", "status"], replace_stdout=replace_uuids
)
# test invalid call ('s' abbrevates 'status' and not 'show'
self.call_nmcli_l(
["-f", "ALL", "dev", "s", "eth0"], replace_stdout=replace_uuids
)
self.call_nmcli_l(
["-f", "ALL", "dev", "show", "eth0"], replace_stdout=replace_uuids
)
self.call_nmcli_l(
["-f", "ALL", "-t", "dev", "show", "eth0"], replace_stdout=replace_uuids
)
self.async_wait()
self.srv.setProperty(
"/org/freedesktop/NetworkManager/ActiveConnection/1",
"State",
dbus.UInt32(NM.ActiveConnectionState.DEACTIVATING),
)
self.call_nmcli_l([], replace_stdout=replace_uuids)
for i in [0, 1]:
if i == 1:
self.async_wait()
self.srv.op_ConnectionSetVisible(False, con_id="ethernet")
for mode in Util.iter_nmcli_output_modes():
self.call_nmcli_l(
mode + ["-f", "ALL", "con"], replace_stdout=replace_uuids
)
self.call_nmcli_l(
mode + ["-f", "UUID,TYPE", "con"], replace_stdout=replace_uuids
)
self.call_nmcli_l(
mode + ["con", "s", "ethernet"], replace_stdout=replace_uuids
)
self.call_nmcli_l(
mode
+ ["c", "s", "/org/freedesktop/NetworkManager/ActiveConnection/1"],
replace_stdout=replace_uuids,
)
self.call_nmcli_l(
mode + ["-f", "all", "dev", "show", "eth0"],
replace_stdout=replace_uuids,
)
@nm_test
def test_004(self):
self.init_001()
replace_uuids = []
replace_uuids.append(
self.ReplaceTextConUuid("con-xx1", "UUID-con-xx1-REPLACED-REPLACED-REPLA")
)
self.call_nmcli(
[
"c",
"add",
"type",
"wifi",
"ifname",
"*",
"ssid",
"foobar",
"con-name",
"con-xx1",
],
replace_stdout=replace_uuids,
)
self.call_nmcli(["connection", "mod", "con-xx1", "ip.gateway", ""])
self.call_nmcli(
["connection", "mod", "con-xx1", "ipv4.gateway", "172.16.0.1"], lang="pl"
)
self.call_nmcli(["connection", "mod", "con-xx1", "ipv6.gateway", "::99"])
self.call_nmcli(["connection", "mod", "con-xx1", "802.abc", ""])
self.call_nmcli(["connection", "mod", "con-xx1", "802-11-wireless.band", "a"])
self.call_nmcli(
[
"connection",
"mod",
"con-xx1",
"ipv4.addresses",
"192.168.77.5/24",
"ipv4.routes",
"2.3.4.5/32 192.168.77.1",
"ipv6.addresses",
"1:2:3:4::6/64",
"ipv6.routes",
"1:2:3:4:5:6::5/128",
]
)
self.call_nmcli_l(["con", "s", "con-xx1"], replace_stdout=replace_uuids)
self.async_wait()
replace_uuids.append(
self.ReplaceTextConUuid("con-vpn-1", "UUID-con-vpn-1-REPLACED-REPLACED-REP")
)
self.call_nmcli(
[
"connection",
"add",
"type",
"vpn",
"con-name",
"con-vpn-1",
"ifname",
"*",
"vpn-type",
"openvpn",
"vpn.data",
"key1 = val1, key2 = val2, key3=val3",
],
replace_stdout=replace_uuids,
)
self.call_nmcli_l(["con", "s"], replace_stdout=replace_uuids)
self.call_nmcli_l(["con", "s", "con-vpn-1"], replace_stdout=replace_uuids)
self.call_nmcli(["con", "up", "con-xx1"])
self.call_nmcli_l(["con", "s"], replace_stdout=replace_uuids)
self.call_nmcli(["con", "up", "con-vpn-1"])
self.call_nmcli_l(["con", "s"], replace_stdout=replace_uuids)
self.call_nmcli_l(["con", "s", "con-vpn-1"], replace_stdout=replace_uuids)
self.async_wait()
self.srv.setProperty(
"/org/freedesktop/NetworkManager/ActiveConnection/2",
"VpnState",
dbus.UInt32(NM.VpnConnectionState.ACTIVATED),
)
uuids = Util.replace_text_sort_list(
[c[1] for c in self.srv.findConnections()], replace_uuids
)
self.call_nmcli_l([], replace_stdout=replace_uuids)
for mode in Util.iter_nmcli_output_modes():
self.call_nmcli_l(
mode + ["con", "s", "con-vpn-1"], replace_stdout=replace_uuids
)
self.call_nmcli_l(
mode + ["con", "s", "con-vpn-1"], replace_stdout=replace_uuids
)
self.call_nmcli_l(
mode + ["-f", "ALL", "con", "s", "con-vpn-1"],
replace_stdout=replace_uuids,
)
# This only filters 'vpn' settings from the connection profile.
# Contrary to '-f GENERAL' below, it does not show the properties of
# the activated VPN connection. This is a nmcli bug.
self.call_nmcli_l(
mode + ["-f", "VPN", "con", "s", "con-vpn-1"],
replace_stdout=replace_uuids,
)
self.call_nmcli_l(
mode + ["-f", "GENERAL", "con", "s", "con-vpn-1"],
replace_stdout=replace_uuids,
)
self.call_nmcli_l(mode + ["dev", "s"], replace_stdout=replace_uuids)
self.call_nmcli_l(
mode + ["-f", "all", "dev", "status"], replace_stdout=replace_uuids
)
self.call_nmcli_l(mode + ["dev", "show"], replace_stdout=replace_uuids)
self.call_nmcli_l(
mode + ["-f", "all", "dev", "show"], replace_stdout=replace_uuids
)
self.call_nmcli_l(
mode + ["dev", "show", "wlan0"], replace_stdout=replace_uuids
)
self.call_nmcli_l(
mode + ["-f", "all", "dev", "show", "wlan0"],
replace_stdout=replace_uuids,
)
self.call_nmcli_l(
mode
+ [
"-f",
"GENERAL,GENERAL.HWADDR,WIFI-PROPERTIES",
"dev",
"show",
"wlan0",
],
replace_stdout=replace_uuids,
)
self.call_nmcli_l(
mode
+ [
"-f",
"GENERAL,GENERAL.HWADDR,WIFI-PROPERTIES",
"dev",
"show",
"wlan0",
],
replace_stdout=replace_uuids,
)
self.call_nmcli_l(
mode + ["-f", "DEVICE,TYPE,DBUS-PATH", "dev"],
replace_stdout=replace_uuids,
)
self.call_nmcli_l(
mode + ["-f", "ALL", "device", "wifi", "list"],
replace_stdout=replace_uuids,
)
self.call_nmcli_l(
mode + ["-f", "COMMON", "device", "wifi", "list"],
replace_stdout=replace_uuids,
)
self.call_nmcli_l(
mode
+ [
"-f",
"NAME,SSID,SSID-HEX,BSSID,MODE,CHAN,FREQ,RATE,SIGNAL,BARS,SECURITY,WPA-FLAGS,RSN-FLAGS,DEVICE,ACTIVE,IN-USE,DBUS-PATH",
"device",
"wifi",
"list",
],
replace_stdout=replace_uuids,
)
self.call_nmcli_l(
mode
+ ["-f", "ALL", "device", "wifi", "list", "bssid", "C0:E2:BE:E8:EF:B6"],
replace_stdout=replace_uuids,
)
self.call_nmcli_l(
mode
+ [
"-f",
"COMMON",
"device",
"wifi",
"list",
"bssid",
"C0:E2:BE:E8:EF:B6",
],
replace_stdout=replace_uuids,
)
self.call_nmcli_l(
mode
+ [
"-f",
"NAME,SSID,SSID-HEX,BSSID,MODE,CHAN,FREQ,RATE,SIGNAL,BARS,SECURITY,WPA-FLAGS,RSN-FLAGS,DEVICE,ACTIVE,IN-USE,DBUS-PATH",
"device",
"wifi",
"list",
"bssid",
"C0:E2:BE:E8:EF:B6",
],
replace_stdout=replace_uuids,
)
self.call_nmcli_l(
mode + ["-f", "ALL", "device", "show", "wlan0"],
replace_stdout=replace_uuids,
)
self.call_nmcli_l(
mode + ["-f", "COMMON", "device", "show", "wlan0"],
replace_stdout=replace_uuids,
)
self.call_nmcli_l(
mode
+ [
"-f",
"GENERAL,CAPABILITIES,WIFI-PROPERTIES,AP,WIRED-PROPERTIES,WIMAX-PROPERTIES,NSP,IP4,DHCP4,IP6,DHCP6,BOND,TEAM,BRIDGE,VLAN,BLUETOOTH,CONNECTIONS",
"device",
"show",
"wlan0",
],
replace_stdout=replace_uuids,
)
self.call_nmcli_l(
mode + ["dev", "lldp", "list", "ifname", "eth0"],
replace_stdout=replace_uuids,
)
self.call_nmcli_l(
mode
+ [
"-f",
"connection.id,connection.uuid,connection.type,connection.interface-name,802-3-ethernet.mac-address,vpn.user-name",
"connection",
"show",
]
+ uuids,
replace_stdout=replace_uuids,
replace_cmd=replace_uuids,
)
@nm_test_no_dbus
def test_offline(self):
# Make sure we're not using D-Bus
no_dbus_env = {
"DBUS_SYSTEM_BUS_ADDRESS": "very:invalid",
"DBUS_SESSION_BUS_ADDRESS": "very:invalid",
}
# This check just makes sure the above works and the
# "nmcli g" command indeed fails talking to D-Bus
self.call_nmcli(
["g"],
extra_env=no_dbus_env,
replace_stderr=[
Util.ReplaceTextRegex(
# depending on glib version, it prints `%s', '%s', or “%s”.
# depending on libc version, it converts unicode to ? or *.
r"Key/Value pair 0, [`*?']invalid[*?'], in address element [`*?']very:invalid[*?'] does not contain an equal sign",
"Key/Value pair 0, 'invalid', in address element 'very:invalid' does not contain an equal sign",
)
],
)
replace_uuids = [
Util.ReplaceTextRegex(
r"\buuid=[-a-f0-9]+\b", "uuid=UUID-WAS-HERE-BUT-IS-NO-MORE-SADLY"
)
]
self.call_nmcli(
["--offline", "c", "add", "type", "ethernet"],
extra_env=no_dbus_env,
replace_stdout=replace_uuids,
)
self.call_nmcli(
["--offline", "c", "show"],
extra_env=no_dbus_env,
)
self.call_nmcli(
["--offline", "g"],
extra_env=no_dbus_env,
)
self.call_nmcli(
["--offline"],
extra_env=no_dbus_env,
)
self.call_nmcli(
[
"--offline",
"c",
"add",
"type",
"wifi",
"ssid",
"lala",
"802-1x.eap",
"pwd",
"802-1x.identity",
"foo",
"802-1x.password",
"bar",
],
extra_env=no_dbus_env,
replace_stdout=replace_uuids,
)
self.call_nmcli(
[
"--offline",
"c",
"add",
"type",
"wifi",
"ssid",
"lala",
"802-1x.eap",
"pwd",
"802-1x.identity",
"foo",
"802-1x.password",
"bar",
"802-1x.password-flags",
"agent-owned",
],
extra_env=no_dbus_env,
replace_stdout=replace_uuids,
)
self.call_nmcli(
["--complete-args", "--offline", "conn", "modify", "ipv6.ad"],
extra_env=no_dbus_env,
)
@skip_without_pexpect
2022-06-28 14:29:24 +02:00
@nm_test
def test_ask_mode(self):
nmc = self.call_nmcli_pexpect(["--ask", "c", "add"])
nmc.expect("Connection type:")
nmc.sendline("ethernet")
nmc.expect("Interface name:")
nmc.sendline("eth0")
nmc.expect("There are 3 optional settings for Wired Ethernet.")
nmc.expect("Do you want to provide them\? \(yes/no\) \[yes]")
nmc.sendline("no")
nmc.expect("There are 2 optional settings for IPv4 protocol.")
nmc.expect("Do you want to provide them\? \(yes/no\) \[yes]")
nmc.sendline("no")
nmc.expect("There are 2 optional settings for IPv6 protocol.")
nmc.expect("Do you want to provide them\? \(yes/no\) \[yes]")
nmc.sendline("no")
nmc.expect("There are 4 optional settings for Proxy.")
nmc.expect("Do you want to provide them\? \(yes/no\) \[yes]")
nmc.sendline("no")
nmc.expect("Connection 'ethernet' \(.*\) successfully added.")
nmc.expect(pexpect.EOF)
###############################################################################
def main():
global dbus_session_inited
if len(sys.argv) >= 2 and sys.argv[1] == "--started-with-dbus-session":
dbus_session_inited = True
del sys.argv[1]
if not dbus_session_inited:
# we don't have yet our own dbus-session. Reexec ourself with
# a new dbus-session.
try:
try:
os.execlp(
"dbus-run-session",
"dbus-run-session",
"--",
sys.executable,
__file__,
"--started-with-dbus-session",
*sys.argv[1:]
)
except OSError as e:
if e.errno != errno.ENOENT:
raise
# we have no dbus-run-session in path? Fall-through
# to skip tests gracefully
else:
raise Exception("unknown error during exec")
except Exception as e:
assert False, "Failure to re-exec dbus-run-session: %s" % (str(e))
if not dbus_session_inited:
# we still don't have a D-Bus session. Probably dbus-run-session is not available.
# retry with dbus-launch
if os.system("type dbus-launch 1>/dev/null") == 0:
try:
os.execlp(
"bash",
"bash",
"-e",
"-c",
"eval `dbus-launch --sh-syntax`;\n"
+ 'trap "kill $DBUS_SESSION_BUS_PID" EXIT;\n'
+ "\n"
+ " ".join(
[
Util.quote(a)
for a in [
sys.executable,
__file__,
"--started-with-dbus-session",
]
+ sys.argv[1:]
]
)
+ " \n"
+ "",
)
except Exception as e:
m = str(e)
else:
m = "unknown error"
assert False, "Failure to re-exec to start script with dbus-launch: %s" % (
m
)
r = unittest.main(exit=False)
sys.exit(not r.result.wasSuccessful())
if __name__ == "__main__":
main()