diff --git a/.github/labeler.yml b/.github/labeler.yml index eaf08134c34..b17d7a4a131 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -2,6 +2,6 @@ documentation: - all: - changed-files: - any-glob-to-any-file: - - 'doc/*' - - 'cloudinit/config/schemas/*' + - 'doc/**' + - 'cloudinit/config/schemas/**' - base-branch: ['main'] diff --git a/.github/workflows/check_format.yml b/.github/workflows/check_format.yml index ba44e665811..c2f5808a53e 100644 --- a/.github/workflows/check_format.yml +++ b/.github/workflows/check_format.yml @@ -19,7 +19,7 @@ jobs: matrix: env: [ruff, mypy, pylint, black, isort] name: Check ${{ matrix.env }} - runs-on: ubuntu-20.04 + runs-on: ubuntu-24.04 steps: - name: "Checkout #1" uses: actions/checkout@v3.0.0 diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml index fd438b2106e..b15578864b6 100644 --- a/.github/workflows/cla.yml +++ b/.github/workflows/cla.yml @@ -1,4 +1,4 @@ -name: Verify Contributor License Agreement +name: CLA Check on: [pull_request] diff --git a/.github/workflows/labeler.yaml b/.github/workflows/labeler.yaml index 71171438900..a47c5a338e2 100644 --- a/.github/workflows/labeler.yaml +++ b/.github/workflows/labeler.yaml @@ -1,4 +1,4 @@ -name: "Pull Request Labeler" +name: PR Labeler on: - pull_request_target diff --git a/.github/workflows/packaging-tests.yml b/.github/workflows/packaging-tests.yml new file mode 100644 index 00000000000..bec3752c48b --- /dev/null +++ b/.github/workflows/packaging-tests.yml @@ -0,0 +1,41 @@ +name: Integration Tests + +on: + pull_request: + branches: + - 'ubuntu/**' + +concurrency: + group: 'ci-${{ github.workflow }}-${{ github.ref }}' + cancel-in-progress: true + +defaults: + run: + shell: sh -ex {0} + +env: + RELEASE: focal + +jobs: + daily-ppa-recipe-check: + runs-on: ubuntu-22.04 + steps: + - name: "Checkout" + uses: actions/checkout@v3 + with: + # Fetch all branches for merging + fetch-depth: 0 + - name: "Prepare dependencies" + run: | + sudo DEBIAN_FRONTEND=noninteractive apt-get update + sudo DEBIAN_FRONTEND=noninteractive apt-get -y install tox quilt + - name: "Setup quilt environment" + run: | + echo 'QUILT_PATCHES=debian/patches' >> ~/.quiltrc + echo 'QUILT_SERIES=debian/patches/series' >> ~/.quiltrc + + - name: 'Daily recipe: quilt patches apply successfully and tests run' + run: | + quilt push -a + tox -e py3 + quilt pop -a diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml index 5eda03c8e63..d704ae64d7b 100644 --- a/.github/workflows/unit.yml +++ b/.github/workflows/unit.yml @@ -14,19 +14,21 @@ jobs: unittests: strategy: matrix: - python-version: ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] + python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] toxenv: [py3] + slug: [""] experimental: [false] check-latest: [false] continue-on-error: [false] include: - - python-version: "3.6" + - python-version: "3.8" toxenv: lowest-supported + slug: (lowest-supported) continue-on-error: false check-latest: false experimental: false - name: unittest / ${{ matrix.toxenv }} / python ${{matrix.python-version}} - runs-on: ubuntu-20.04 + name: Python ${{matrix.python-version}} ${{ matrix.slug }} + runs-on: ubuntu-22.04 continue-on-error: ${{ matrix.experimental }} steps: - name: "Checkout" diff --git a/cloudinit/cmd/devel/parser.py b/cloudinit/cmd/devel/parser.py index 7ddb8fc74e3..a5dffc010c3 100644 --- a/cloudinit/cmd/devel/parser.py +++ b/cloudinit/cmd/devel/parser.py @@ -39,7 +39,7 @@ def get_parser(parser=None): make_mime.handle_args, ), ] - for (subcmd, helpmsg, get_parser, handler) in subcmds: + for subcmd, helpmsg, get_parser, handler in subcmds: parser = subparsers.add_parser(subcmd, help=helpmsg) get_parser(parser) parser.set_defaults(action=(subcmd, handler)) diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index 2de9826bb83..72f13fb2257 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -949,14 +949,17 @@ def main(sysv_args=None): "--debug", "-d", action="store_true", - help="Show additional pre-action logging (default: %(default)s).", + help=( + "DEPRECATED: Show additional pre-action " + "logging (default: %(default)s)." + ), default=False, ) parser.add_argument( "--force", action="store_true", help=( - "Force running even if no datasource is" + "DEPRECATED: Force running even if no datasource is" " found (use at your own risk)." ), dest="force", @@ -979,7 +982,10 @@ def main(sysv_args=None): # Each action and its sub-options (if any) parser_init = subparsers.add_parser( - "init", help="Initialize cloud-init and perform initial modules." + "init", + help=( + "DEPRECATED: Initialize cloud-init and perform initial modules." + ), ) parser_init.add_argument( "--local", @@ -1002,7 +1008,8 @@ def main(sysv_args=None): # These settings are used for the 'config' and 'final' stages parser_mod = subparsers.add_parser( - "modules", help="Activate modules using a given configuration key." + "modules", + help=("DEPRECATED: Activate modules using a given configuration key."), ) extra_help = lifecycle.deprecate( deprecated="`init`", @@ -1033,7 +1040,11 @@ def main(sysv_args=None): # This subcommand allows you to run a single module parser_single = subparsers.add_parser( - "single", help="Run a single module." + "single", + help=( + "Manually run a single module. Useful for " + "testing during development." + ), ) parser_single.add_argument( "--name", diff --git a/cloudinit/config/cc_ansible.py b/cloudinit/config/cc_ansible.py index 3b9e931a58d..b14781adf97 100644 --- a/cloudinit/config/cc_ansible.py +++ b/cloudinit/config/cc_ansible.py @@ -1,4 +1,5 @@ """ansible enables running on first boot either ansible-pull""" + import abc import logging import os diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 459f0a3cded..8f2978b0579 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -65,12 +65,10 @@ def __init__(self, distro: Distro): self._distro = distro @abstractmethod - def available(self, devices: list) -> bool: - ... + def available(self, devices: list) -> bool: ... @abstractmethod - def resize(self, diskdev, partnum, partdev, fs): - ... + def resize(self, diskdev, partnum, partdev, fs): ... class ResizeGrowPart(Resizer): diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py index 50cecc03bd2..b9dc22a4cfb 100644 --- a/cloudinit/config/cc_phone_home.py +++ b/cloudinit/config/cc_phone_home.py @@ -95,7 +95,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: "pub_key_ed25519": "/etc/ssh/ssh_host_ed25519_key.pub", } - for (n, path) in pubkeys.items(): + for n, path in pubkeys.items(): try: all_keys[n] = util.load_text_file(path) except Exception: @@ -117,7 +117,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: # Get them read to be posted real_submit_keys = {} - for (k, v) in submit_keys.items(): + for k, v in submit_keys.items(): if v is None: real_submit_keys[k] = "N/A" else: diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index b90db58ff88..70bd4e17f3b 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -58,7 +58,10 @@ def _resize_btrfs(mount_point, devpth): # the resize operation can be queued btrfs_with_queue = lifecycle.Version.from_str("5.10") system_btrfs_ver = lifecycle.Version.from_str( - subp.subp(["btrfs", "--version"])[0].split("v")[-1].strip() + subp.subp(["btrfs", "--version"]) + .stdout.split("\n")[0] + .split("v")[-1] + .strip() ) if system_btrfs_ver >= btrfs_with_queue: idx = cmd.index("resize") @@ -290,7 +293,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: return fstype_lc = fs_type.lower() - for (pfix, root_cmd) in RESIZE_FS_PREFIXES_CMDS: + for pfix, root_cmd in RESIZE_FS_PREFIXES_CMDS: if fstype_lc.startswith(pfix): resizer = root_cmd break diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index 947469b5b6d..4c4f0c33e18 100644 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -111,7 +111,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: if "ssh_keys" in cfg: # if there are keys and/or certificates in cloud-config, use them cert_config = [] - for (key, val) in cfg["ssh_keys"].items(): + for key, val in cfg["ssh_keys"].items(): if key not in CONFIG_KEY_TO_FILE: if pattern_unsupported_config_keys.match(key): reason = "unsupported" diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py index 106b3cbd0c3..8b02789b739 100644 --- a/cloudinit/config/cc_ssh_authkey_fingerprints.py +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py @@ -112,7 +112,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "sha256") (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro) - for (user_name, _cfg) in users.items(): + for user_name, _cfg in users.items(): if _cfg.get("no_create_home") or _cfg.get("system"): LOG.debug( "Skipping printing of ssh fingerprints for user '%s' because " diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py index 8abf3914fc6..7c1422dee3d 100644 --- a/cloudinit/config/cc_ssh_import_id.py +++ b/cloudinit/config/cc_ssh_import_id.py @@ -59,7 +59,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: # import for cloudinit created users (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro) elist = [] - for (user, user_cfg) in users.items(): + for user, user_cfg in users.items(): import_ids = [] if user_cfg["default"]: import_ids = util.get_cfg_option_list(cfg, "ssh_import_id", []) diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py index ace17733c3a..0f3b2121cf4 100644 --- a/cloudinit/config/cc_users_groups.py +++ b/cloudinit/config/cc_users_groups.py @@ -37,10 +37,10 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: (default_user, _user_config) = ug_util.extract_default(users) cloud_keys = cloud.get_public_ssh_keys() or [] - for (name, members) in groups.items(): + for name, members in groups.items(): cloud.distro.create_group(name, members) - for (user, config) in users.items(): + for user, config in users.items(): no_home = [key for key in NO_HOME if config.get(key)] need_home = [key for key in NEED_HOME if config.get(key)] diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py index 004ede438d9..c05f227a40b 100644 --- a/cloudinit/config/cc_write_files.py +++ b/cloudinit/config/cc_write_files.py @@ -78,7 +78,7 @@ def write_files(name, files, owner: str, ssl_details: Optional[dict] = None): if not files: return - for (i, f_info) in enumerate(files): + for i, f_info in enumerate(files): path = f_info.get("path") if not path: LOG.warning( diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index 548c83bab6d..d857d89d30d 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -89,7 +89,7 @@ def _format_repository_config(repo_id, repo_config): to_be = ConfigParser() to_be.add_section(repo_id) # Do basic translation of the items -> values - for (k, v) in repo_config.items(): + for k, v in repo_config.items(): # For now assume that people using this know # the format of yum and don't verify keys/values further to_be.set(repo_id, k, _format_repo_value(v)) @@ -114,7 +114,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: ) repo_locations = {} repo_configs = {} - for (repo_id, repo_config) in repos.items(): + for repo_id, repo_config in repos.items(): canon_repo_id = _canonicalize_id(repo_id) repo_fn_pth = os.path.join(repo_base_path, "%s.repo" % (canon_repo_id)) if os.path.exists(repo_fn_pth): @@ -135,7 +135,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: repo_config = {} # Do some basic sanity checks/cleaning n_repo_config = {} - for (k, v) in repo_config.items(): + for k, v in repo_config.items(): k = k.lower().strip().replace("-", "_") if k: n_repo_config[k] = v @@ -157,7 +157,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: repo_configs[canon_repo_id] = repo_config repo_locations[canon_repo_id] = repo_fn_pth - for (c_repo_id, path) in repo_locations.items(): + for c_repo_id, path in repo_locations.items(): repo_blob = _format_repository_config( c_repo_id, repo_configs.get(c_repo_id) ) diff --git a/cloudinit/config/cc_zypper_add_repo.py b/cloudinit/config/cc_zypper_add_repo.py index 02b372dcc70..fa55e09b07c 100644 --- a/cloudinit/config/cc_zypper_add_repo.py +++ b/cloudinit/config/cc_zypper_add_repo.py @@ -54,7 +54,7 @@ def _format_repository_config(repo_id, repo_config): to_be = configobj.ConfigObj() to_be[repo_id] = {} # Do basic translation of the items -> values - for (k, v) in repo_config.items(): + for k, v in repo_config.items(): # For now assume that people using this know the format # of zypper repos and don't verify keys/values further to_be[repo_id][k] = _format_repo_value(v) @@ -115,7 +115,7 @@ def _write_repos(repos, repo_base_path): valid_repos[repo_id] = (repo_fn_pth, repo_config) - for (repo_id, repo_data) in valid_repos.items(): + for repo_id, repo_data in valid_repos.items(): repo_blob = _format_repository_config(repo_id, repo_data[-1]) util.write_file(repo_data[0], repo_blob) diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py index a2fceecabcb..de9547b987c 100644 --- a/cloudinit/config/schema.py +++ b/cloudinit/config/schema.py @@ -46,9 +46,7 @@ ) try: - from jsonschema import ValidationError as _ValidationError - - ValidationError = _ValidationError + from jsonschema import ValidationError except ImportError: ValidationError = Exception # type: ignore @@ -1559,9 +1557,9 @@ def get_meta_doc(meta: MetaSchema, schema: Optional[dict] = None) -> str: LOG.warning("Unable to render property_doc due to invalid schema") meta_copy["property_doc"] = "" if not meta_copy.get("property_doc", ""): - meta_copy[ - "property_doc" - ] = " No schema definitions for this module" + meta_copy["property_doc"] = ( + " No schema definitions for this module" + ) meta_copy["examples"] = textwrap.indent(_get_examples(meta), " ") if not meta_copy["examples"]: meta_copy["examples"] = " No examples for this module" diff --git a/cloudinit/distros/aosc.py b/cloudinit/distros/aosc.py index 0460c740d5c..96fa48b8b6e 100644 --- a/cloudinit/distros/aosc.py +++ b/cloudinit/distros/aosc.py @@ -107,7 +107,7 @@ def package_command(self, command, args=None, pkgs=None): def install_packages(self, pkglist: PackageList): self.package_command("install", pkgs=pkglist) - def update_package_sources(self): + def update_package_sources(self, *, force=False): self._runner.run( "update-sources", self.package_command, @@ -131,7 +131,7 @@ def update_locale_conf(sys_path, locale_cfg): return (exists, contents) = read_locale_conf(sys_path) updated_am = 0 - for (k, v) in locale_cfg.items(): + for k, v in locale_cfg.items(): if v is None: continue v = str(v) diff --git a/cloudinit/distros/package_management/package_manager.py b/cloudinit/distros/package_management/package_manager.py index 32c4cac246c..9b45bb870b4 100644 --- a/cloudinit/distros/package_management/package_manager.py +++ b/cloudinit/distros/package_management/package_manager.py @@ -22,8 +22,7 @@ def available(self) -> bool: """Return if package manager is installed on system.""" @abstractmethod - def update_package_sources(self, *, force=False): - ... + def update_package_sources(self, *, force=False): ... @abstractmethod def install_packages(self, pkglist: Iterable) -> UninstalledPackages: diff --git a/cloudinit/distros/package_management/snap.py b/cloudinit/distros/package_management/snap.py index baab9e3ca85..8732cbc43e0 100644 --- a/cloudinit/distros/package_management/snap.py +++ b/cloudinit/distros/package_management/snap.py @@ -35,4 +35,23 @@ def install_packages(self, pkglist: Iterable) -> UninstalledPackages: @staticmethod def upgrade_packages(): - subp.subp(["snap", "refresh"]) + command = ["snap", "get", "system", "-d"] + snap_hold = None + try: + result = subp.subp(command) + snap_hold = ( + util.load_json(result.stdout).get("refresh", {}).get("hold") + ) + except subp.ProcessExecutionError as e: + LOG.info( + "Continuing to snap refresh. Unable to run command: %s: %s", + command, + e, + ) + if snap_hold == "forever": + LOG.info( + "Skipping snap refresh because refresh.hold is set to '%s'", + snap_hold, + ) + else: + subp.subp(["snap", "refresh"]) diff --git a/cloudinit/distros/parsers/hostname.py b/cloudinit/distros/parsers/hostname.py index 7e498a5fab1..7250b6a8eb2 100644 --- a/cloudinit/distros/parsers/hostname.py +++ b/cloudinit/distros/parsers/hostname.py @@ -22,7 +22,7 @@ def parse(self): def __str__(self): self.parse() contents = StringIO() - for (line_type, components) in self._contents: + for line_type, components in self._contents: if line_type == "blank": contents.write("%s\n" % (components[0])) elif line_type == "all_comment": @@ -39,7 +39,7 @@ def __str__(self): @property def hostname(self): self.parse() - for (line_type, components) in self._contents: + for line_type, components in self._contents: if line_type == "hostname": return components[0] return None @@ -50,7 +50,7 @@ def set_hostname(self, your_hostname): return self.parse() replaced = False - for (line_type, components) in self._contents: + for line_type, components in self._contents: if line_type == "hostname": components[0] = str(your_hostname) replaced = True diff --git a/cloudinit/distros/parsers/hosts.py b/cloudinit/distros/parsers/hosts.py index d907e8b67f8..8d2f73ac91f 100644 --- a/cloudinit/distros/parsers/hosts.py +++ b/cloudinit/distros/parsers/hosts.py @@ -24,7 +24,7 @@ def parse(self): def get_entry(self, ip): self.parse() options = [] - for (line_type, components) in self._contents: + for line_type, components in self._contents: if line_type == "option": (pieces, _tail) = components if len(pieces) and pieces[0] == ip: @@ -34,7 +34,7 @@ def get_entry(self, ip): def del_entries(self, ip): self.parse() n_entries = [] - for (line_type, components) in self._contents: + for line_type, components in self._contents: if line_type != "option": n_entries.append((line_type, components)) continue @@ -68,7 +68,7 @@ def _parse(self, contents): def __str__(self): self.parse() contents = StringIO() - for (line_type, components) in self._contents: + for line_type, components in self._contents: if line_type == "blank": contents.write("%s\n" % (components[0])) elif line_type == "all_comment": diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py index 000d0b577ab..6884c740989 100644 --- a/cloudinit/distros/parsers/resolv_conf.py +++ b/cloudinit/distros/parsers/resolv_conf.py @@ -57,7 +57,7 @@ def search_domains(self): def __str__(self): self.parse() contents = StringIO() - for (line_type, components) in self._contents: + for line_type, components in self._contents: if line_type == "blank": contents.write("\n") elif line_type == "all_comment": @@ -72,7 +72,7 @@ def __str__(self): def _retr_option(self, opt_name): found = [] - for (line_type, components) in self._contents: + for line_type, components in self._contents: if line_type == "option": (cfg_opt, cfg_value, _comment_tail) = components if cfg_opt == opt_name: @@ -134,7 +134,7 @@ def add_search_domain(self, search_domain): def _parse(self, contents): entries = [] - for (i, line) in enumerate(contents.splitlines()): + for i, line in enumerate(contents.splitlines()): sline = line.strip() if not sline: entries.append(("blank", [line])) diff --git a/cloudinit/distros/rhel_util.py b/cloudinit/distros/rhel_util.py index 426335f9b96..6a1b28163fd 100644 --- a/cloudinit/distros/rhel_util.py +++ b/cloudinit/distros/rhel_util.py @@ -22,7 +22,7 @@ def update_sysconfig_file(fn, adjustments, allow_empty=False): return (exists, contents) = read_sysconfig_file(fn) updated_am = 0 - for (k, v) in adjustments.items(): + for k, v in adjustments.items(): if v is None: continue v = str(v) diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index d12944258b1..470a5b2013f 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -369,13 +369,13 @@ def _unpickle(self, ci_pkl_version: int) -> None: if "instance_data" not in self.lookups: self.lookups["instance_data"] = "instance-data.json" if "instance_data_sensitive" not in self.lookups: - self.lookups[ - "instance_data_sensitive" - ] = "instance-data-sensitive.json" + self.lookups["instance_data_sensitive"] = ( + "instance-data-sensitive.json" + ) if "combined_cloud_config" not in self.lookups: - self.lookups[ - "combined_cloud_config" - ] = "combined-cloud-config.json" + self.lookups["combined_cloud_config"] = ( + "combined-cloud-config.json" + ) if "hotplug.enabled" not in self.lookups: self.lookups["hotplug.enabled"] = "hotplug.enabled" diff --git a/cloudinit/log.py b/cloudinit/log.py index 983b426b7ce..fd83c994c88 100644 --- a/cloudinit/log.py +++ b/cloudinit/log.py @@ -176,7 +176,7 @@ def setup_backup_logging(): which may ease debugging. """ fallback_handler = logging.StreamHandler(sys.stderr) - fallback_handler.handleError = lambda record: None + setattr(fallback_handler, "handleError", lambda record: None) fallback_handler.setFormatter( logging.Formatter( "FALLBACK: %(asctime)s - %(filename)s[%(levelname)s]: %(message)s" @@ -189,7 +189,7 @@ def handleError(self, record): fallback_handler.handle(record) fallback_handler.flush() - logging.Handler.handleError = handleError + setattr(logging.Handler, "handleError", handleError) class CloudInitLogRecord(logging.LogRecord): diff --git a/cloudinit/mergers/__init__.py b/cloudinit/mergers/__init__.py index dcbd5c82e60..ce1d8ebce5f 100644 --- a/cloudinit/mergers/__init__.py +++ b/cloudinit/mergers/__init__.py @@ -127,7 +127,7 @@ def default_mergers(): def construct(parsed_mergers): mergers_to_be = [] - for (m_name, m_ops) in parsed_mergers: + for m_name, m_ops in parsed_mergers: if not m_name.startswith(MERGER_PREFIX): m_name = MERGER_PREFIX + str(m_name) merger_locs, looked_locs = importer.find_module( @@ -147,6 +147,6 @@ def construct(parsed_mergers): # Now form them... mergers = [] root = LookupMerger(mergers) - for (attr, opts) in mergers_to_be: + for attr, opts in mergers_to_be: mergers.append(attr(root, opts)) return root diff --git a/cloudinit/mergers/m_dict.py b/cloudinit/mergers/m_dict.py index 9c51bc982b2..20d2716ffa4 100644 --- a/cloudinit/mergers/m_dict.py +++ b/cloudinit/mergers/m_dict.py @@ -61,7 +61,7 @@ def merge_same_key(old_v, new_v): # Otherwise leave it be... return old_v - for (k, v) in merge_with.items(): + for k, v in merge_with.items(): if k in value: if v is None and self._allow_delete: value.pop(k) diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index e38b6779dd4..28ac814b6e9 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -555,10 +555,8 @@ def find_fallback_nic_on_linux() -> Optional[str]: return None -def generate_fallback_config(config_driver=None): +def generate_fallback_config(config_driver=None) -> Optional[dict]: """Generate network cfg v2 for dhcp on the NIC most likely connected.""" - if not config_driver: - config_driver = False target_name = find_fallback_nic() if not target_name: @@ -572,16 +570,16 @@ def generate_fallback_config(config_driver=None): match = { "macaddress": read_sys_net_safe(target_name, "address").lower() } + if config_driver: + driver = device_driver(target_name) + if driver: + match["driver"] = driver cfg = { "dhcp4": True, "dhcp6": True, "set-name": target_name, "match": match, } - if config_driver: - driver = device_driver(target_name) - if driver: - cfg["match"]["driver"] = driver nconf = {"ethernets": {target_name: cfg}, "version": 2} return nconf @@ -670,7 +668,7 @@ def _get_current_rename_info(check_downable=True): }} """ cur_info = {} - for (name, mac, driver, device_id) in get_interfaces(): + for name, mac, driver, device_id in get_interfaces(): cur_info[name] = { "downable": None, "device_id": device_id, diff --git a/cloudinit/net/ephemeral.py b/cloudinit/net/ephemeral.py index c8730fb1e8a..39dd8ba3c8e 100644 --- a/cloudinit/net/ephemeral.py +++ b/cloudinit/net/ephemeral.py @@ -356,10 +356,10 @@ def obtain_lease(self): kwargs["prefix_or_mask"], kwargs["ip"] ) if kwargs["static_routes"]: - kwargs[ - "static_routes" - ] = self.distro.dhcp_client.parse_static_routes( - kwargs["static_routes"] + kwargs["static_routes"] = ( + self.distro.dhcp_client.parse_static_routes( + kwargs["static_routes"] + ) ) ephipv4 = EphemeralIPv4Network( self.distro, diff --git a/cloudinit/net/netops/__init__.py b/cloudinit/net/netops/__init__.py index 7b95917874b..8c5e2bd13c6 100644 --- a/cloudinit/net/netops/__init__.py +++ b/cloudinit/net/netops/__init__.py @@ -1,3 +1,4 @@ +from abc import abstractmethod from typing import Optional from cloudinit.subp import SubpResult @@ -5,14 +6,17 @@ class NetOps: @staticmethod + @abstractmethod def link_up(interface: str) -> SubpResult: pass @staticmethod + @abstractmethod def link_down(interface: str) -> SubpResult: pass @staticmethod + @abstractmethod def link_rename(current_name: str, new_name: str): pass @@ -41,6 +45,7 @@ def del_route( pass @staticmethod + @abstractmethod def get_default_route() -> str: pass diff --git a/cloudinit/net/network_manager.py b/cloudinit/net/network_manager.py index 06305668fe4..30a56ebffdd 100644 --- a/cloudinit/net/network_manager.py +++ b/cloudinit/net/network_manager.py @@ -145,11 +145,13 @@ def _set_ip_method(self, family, subnet_type): "dhcp": "auto", } - # Ensure we got an [ipvX] section - self._set_default(family, "method", "disabled") + # Ensure we have an [ipvX] section, default to disabled + method = "disabled" + self._set_default(family, "method", method) try: - method = method_map[subnet_type] + if subnet_type: + method = method_map[subnet_type] except KeyError: # What else can we do method = "auto" @@ -171,6 +173,20 @@ def _set_ip_method(self, family, subnet_type): self._set_default("ipv4", "method", "disabled") self.config[family]["method"] = method + + # Network Manager sets the value of `may-fail` to `True` by default. + # Please see https://www.networkmanager.dev/docs/api/1.32.10/settings-ipv6.html. + # Therefore, when no configuration for ipv4 or ipv6 is specified, + # `may-fail = True` applies. When the user explicitly configures ipv4 + # or ipv6, `may-fail` is set to `False`. This is so because it is + # assumed that a network failure with the user provided configuration + # is unexpected. In other words, we think that the user knows what + # works in their target environment and what does not and they have + # correctly configured cloud-init network configuration such that + # it works in that environment. When no such configuration is + # specified, we do not know what would work and what would not in + # user's environment. Therefore, we are more conservative in assuming + # that failure with ipv4 or ipv6 can be expected or tolerated. self._set_default(family, "may-fail", "false") def _get_next_numbered_section(self, section, key_prefix) -> str: @@ -317,16 +333,18 @@ def render_interface(self, iface, network_state, renderer): # These are the interface properties that map nicely # to NetworkManager properties + # NOTE: Please ensure these items are formatted so as + # to match the schema in schema-network-config-v1.json _prop_map = { "bond": { "mode": "bond-mode", - "miimon": "bond_miimon", - "xmit_hash_policy": "bond-xmit-hash-policy", - "num_grat_arp": "bond-num-grat-arp", + "miimon": "bond-miimon", + "xmit_hash_policy": "bond-xmit_hash_policy", + "num_grat_arp": "bond-num_grat_arp", "downdelay": "bond-downdelay", "updelay": "bond-updelay", - "fail_over_mac": "bond-fail-over-mac", - "primary_reselect": "bond-primary-reselect", + "fail_over_mac": "bond-fail_over_mac", + "primary_reselect": "bond-primary_reselect", "primary": "bond-primary", }, "bridge": { @@ -346,6 +364,17 @@ def render_interface(self, iface, network_state, renderer): found_dns_search = [] # Deal with Layer 3 configuration + if if_type == "bond" and not iface["subnets"]: + # If there is no L3 subnet config for a given connection, + # ensure it is disabled. Without this, the interface + # defaults to 'auto' which implies DHCP. This is problematic + # for certain configurations such as bonds where the root + # device itself may not have a subnet config and should be + # disabled while a separate VLAN interface on the bond holds + # the subnet information. + for family in ["ipv4", "ipv6"]: + self._set_ip_method(family, None) + for subnet in iface["subnets"]: family = "ipv6" if subnet_is_ipv6(subnet) else "ipv4" diff --git a/cloudinit/net/openbsd.py b/cloudinit/net/openbsd.py index 83b33e0380c..6dea579b141 100644 --- a/cloudinit/net/openbsd.py +++ b/cloudinit/net/openbsd.py @@ -10,7 +10,7 @@ class Renderer(cloudinit.net.bsd.BSDRenderer): - def write_config(self): + def write_config(self, target=None): for device_name, v in self.interface_configurations.items(): if_file = "etc/hostname.{}".format(device_name) fn = subp.target_path(self.target, if_file) diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 015c4494928..0684116c0b9 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -675,7 +675,7 @@ def _render_subnet_routes(cls, iface_cfg, route_cfg, subnets, flavor): @classmethod def _render_bonding_opts(cls, iface_cfg, iface, flavor): bond_opts = [] - for (bond_key, value_tpl) in cls.bond_tpl_opts: + for bond_key, value_tpl in cls.bond_tpl_opts: # Seems like either dash or underscore is possible? bond_keys = [bond_key, bond_key.replace("_", "-")] for bond_key in bond_keys: diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py index 6aee531638d..be455b889c7 100644 --- a/cloudinit/netinfo.py +++ b/cloudinit/netinfo.py @@ -579,7 +579,7 @@ def netdev_pformat(): return "\n" fields = ["Device", "Up", "Address", "Mask", "Scope", "Hw-Address"] tbl = SimpleTable(fields) - for (dev, data) in sorted(netdev.items()): + for dev, data in sorted(netdev.items()): for addr in data.get("ipv4"): tbl.add_row( ( @@ -635,7 +635,7 @@ def route_pformat(): "Flags", ] tbl_v4 = SimpleTable(fields_v4) - for (n, r) in enumerate(routes.get("ipv4")): + for n, r in enumerate(routes.get("ipv4")): route_id = str(n) try: tbl_v4.add_row( @@ -663,7 +663,7 @@ def route_pformat(): "Flags", ] tbl_v6 = SimpleTable(fields_v6) - for (n, r) in enumerate(routes.get("ipv6")): + for n, r in enumerate(routes.get("ipv6")): route_id = str(n) if r["iface"] == "lo": continue diff --git a/cloudinit/safeyaml.py b/cloudinit/safeyaml.py index 2d4612e9647..a4328068f66 100644 --- a/cloudinit/safeyaml.py +++ b/cloudinit/safeyaml.py @@ -137,9 +137,9 @@ class _CustomSafeLoaderWithMarks(yaml.SafeLoader): def __init__(self, stream): super().__init__(stream) - self.schemamarks_by_line: Dict[ - int, List[SchemaPathMarks] - ] = defaultdict(list) + self.schemamarks_by_line: Dict[int, List[SchemaPathMarks]] = ( + defaultdict(list) + ) def _get_nested_path_prefix(self, node): if node.start_mark.line in self.schemamarks_by_line: @@ -162,8 +162,8 @@ def _get_nested_path_prefix(self, node): return f"{mark.path}." return "" - def construct_mapping(self, node): - mapping = super().construct_mapping(node) + def construct_mapping(self, node, deep=False): + mapping = super().construct_mapping(node, deep=deep) nested_path_prefix = self._get_nested_path_prefix(node) for key_node, value_node in node.value: node_key_path = f"{nested_path_prefix}{key_node.value}" diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py index 727477df462..d674e1fc081 100644 --- a/cloudinit/sources/DataSourceAliYun.py +++ b/cloudinit/sources/DataSourceAliYun.py @@ -94,6 +94,7 @@ class DataSourceAliYunLocal(DataSourceAliYun): (DataSourceAliYun, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), ] + # Return a list of data sources that match this set of dependencies def get_datasource_list(depends): return sources.list_from_depends(depends, datasources) diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 5ca6c27d176..6ddfff429dd 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -208,7 +208,7 @@ def read_config_drive(source_dir): (reader.read_v1, [], {}), ] excps = [] - for (functor, args, kwargs) in finders: + for functor, args, kwargs in finders: try: return functor(*args, **kwargs) except openstack.NonReadable as e: @@ -244,7 +244,7 @@ def on_first_boot(data, distro=None, network=True): def write_injected_files(files): if files: LOG.debug("Writing %s injected files", len(files)) - for (filename, content) in files.items(): + for filename, content in files.items(): if not filename.startswith(os.sep): filename = os.sep + filename try: diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index e1ab1c5fe03..526520be2c0 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -943,9 +943,11 @@ def _get_key_as_int_or(dikt, key, alt_value): _get_key_as_int_or( mmd[1], "device-number", float("infinity") ), - mmd[2] - if fallback_nic_order == NicOrder.NIC_NAME - else mmd[0], + ( + mmd[2] + if fallback_nic_order == NicOrder.NIC_NAME + else mmd[0] + ), ), ) ) diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index 1e2a422cd03..f3bcd5315a2 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -292,7 +292,7 @@ def read_md(address=None, url_params=None, platform_check=True): ) md = {} # Iterate over url_map keys to get metadata items. - for (mkey, paths, required, is_text, is_recursive) in url_map: + for mkey, paths, required, is_text, is_recursive in url_map: value = None for path in paths: new_value = metadata_fetcher.get_value(path, is_text, is_recursive) diff --git a/cloudinit/sources/DataSourceHetzner.py b/cloudinit/sources/DataSourceHetzner.py index 7ef5a5c31fd..6529e2ff1c6 100644 --- a/cloudinit/sources/DataSourceHetzner.py +++ b/cloudinit/sources/DataSourceHetzner.py @@ -76,7 +76,7 @@ def _get_data(self): sec_between=self.wait_retry, retries=self.retries, ) - except (NoDHCPLeaseError) as e: + except NoDHCPLeaseError as e: LOG.error("Bailing, DHCP Exception: %s", e) raise diff --git a/cloudinit/sources/DataSourceLXD.py b/cloudinit/sources/DataSourceLXD.py index 43be28e0a15..cb3f5ece009 100644 --- a/cloudinit/sources/DataSourceLXD.py +++ b/cloudinit/sources/DataSourceLXD.py @@ -176,14 +176,15 @@ class DataSourceLXD(sources.DataSource): _network_config: Union[Dict, str] = sources.UNSET _crawled_metadata: Optional[Union[Dict, str]] = sources.UNSET - sensitive_metadata_keys: Tuple[ - str, ... - ] = sources.DataSource.sensitive_metadata_keys + ( - "user.meta-data", - "user.vendor-data", - "user.user-data", - "cloud-init.user-data", - "cloud-init.vendor-data", + sensitive_metadata_keys: Tuple[str, ...] = ( + sources.DataSource.sensitive_metadata_keys + + ( + "user.meta-data", + "user.vendor-data", + "user.user-data", + "cloud-init.user-data", + "cloud-init.vendor-data", + ) ) skip_hotplug_detect = True diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index 136cea82495..933d95c924b 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -372,7 +372,7 @@ def main(): ) subcmds = parser.add_subparsers(title="subcommands", dest="subcmd") - for (name, help) in ( + for name, help in ( ("crawl", "crawl the datasource"), ("get", "do a single GET of provided url"), ("check-seed", "read and verify seed at url"), diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py index 05b50b98ee2..1ee7bf25287 100644 --- a/cloudinit/sources/DataSourceScaleway.py +++ b/cloudinit/sources/DataSourceScaleway.py @@ -337,7 +337,7 @@ def _get_data(self): func=self._crawl_metadata, ) self.metadata["net_in_use"] = "ipv6" - except (ConnectionError): + except ConnectionError: return False return True diff --git a/cloudinit/sources/DataSourceVultr.py b/cloudinit/sources/DataSourceVultr.py index 2d7f1f31a1e..d62fbe72ea5 100644 --- a/cloudinit/sources/DataSourceVultr.py +++ b/cloudinit/sources/DataSourceVultr.py @@ -30,9 +30,9 @@ class DataSourceVultr(sources.DataSource): dsname = "Vultr" - sensitive_metadata_keys: Tuple[ - str, ... - ] = sources.DataSource.sensitive_metadata_keys + ("startup-script",) + sensitive_metadata_keys: Tuple[str, ...] = ( + sources.DataSource.sensitive_metadata_keys + ("startup-script",) + ) def __init__(self, sys_cfg, distro, paths): super(DataSourceVultr, self).__init__(sys_cfg, distro, paths) diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index a3958d9b918..d8182086fa0 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -764,7 +764,7 @@ def _remap_device(self, short_name): # we want to return the correct value for what will actually # exist in this instance mappings = {"sd": ("vd", "xvd", "vtb")} - for (nfrom, tlist) in mappings.items(): + for nfrom, tlist in mappings.items(): if not short_name.startswith(nfrom): continue for nto in tlist: @@ -1014,7 +1014,7 @@ def normalize_pubkey_data(pubkey_data): return list(pubkey_data) if isinstance(pubkey_data, (dict)): - for (_keyname, klist) in pubkey_data.items(): + for _keyname, klist in pubkey_data.items(): # lp:506332 uec metadata service responds with # data that makes boto populate a string for 'klist' rather # than a list. @@ -1170,7 +1170,7 @@ class BrokenMetadata(IOError): def list_from_depends(depends, ds_list): ret_list = [] depset = set(depends) - for (cls, deps) in ds_list: + for cls, deps in ds_list: if depset == set(deps): ret_list.append(cls) return ret_list diff --git a/cloudinit/sources/helpers/ec2.py b/cloudinit/sources/helpers/ec2.py index ffb41dbfd7a..a3590a6e4b2 100644 --- a/cloudinit/sources/helpers/ec2.py +++ b/cloudinit/sources/helpers/ec2.py @@ -121,7 +121,7 @@ def _materialize(self, blob, base_url): child_blob = self._caller(child_url) child_contents[c] = self._materialize(child_blob, child_url) leaf_contents = {} - for (field, resource) in leaves.items(): + for field, resource in leaves.items(): leaf_url = url_helper.combine_url(base_url, resource) leaf_blob = self._caller(leaf_url) leaf_contents[field] = self._leaf_decoder(field, leaf_blob) diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 9b46a22c37d..97ec18faf98 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -87,7 +87,7 @@ def _ec2_name_to_device(self, name): if not self.ec2_metadata: return None bdm = self.ec2_metadata.get("block-device-mapping", {}) - for (ent_name, device) in bdm.items(): + for ent_name, device in bdm.items(): if name == ent_name: return device return None @@ -266,7 +266,7 @@ def datafiles(version): "version": 2, } data = datafiles(self._find_working_version()) - for (name, (path, required, translator)) in data.items(): + for name, (path, required, translator) in data.items(): path = self._path_join(self.base_path, path) data = None found = False @@ -346,7 +346,7 @@ def datafiles(version): results["ec2-metadata"] = self._read_ec2_metadata() # Perform some misc. metadata key renames... - for (target_key, source_key, is_required) in KEY_COPIES: + for target_key, source_key, is_required in KEY_COPIES: if is_required and source_key not in metadata: raise BrokenMetadata("No '%s' entry in metadata" % source_key) if source_key in metadata: @@ -412,7 +412,7 @@ def read_v1(self): raise NonReadable("%s: no files found" % (self.base_path)) md = {} - for (name, (key, translator, default)) in FILES_V1.items(): + for name, (key, translator, default) in FILES_V1.items(): if name in found: path = found[name] try: diff --git a/cloudinit/sources/helpers/vmware/imc/config_file.py b/cloudinit/sources/helpers/vmware/imc/config_file.py index 9f86838964f..ec17cbc736c 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_file.py +++ b/cloudinit/sources/helpers/vmware/imc/config_file.py @@ -66,7 +66,7 @@ def _loadConfigFile(self, filename): for category in config.sections(): logger.debug("FOUND CATEGORY = '%s'", category) - for (key, value) in config.items(category): + for key, value in config.items(category): self._insertKey(category + "|" + key, value) def get_count_with_prefix(self, prefix): diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py index 254518af9e3..b07214a228b 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_nic.py +++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py @@ -207,7 +207,7 @@ def gen_ipv6(self, name, nic): """ if not nic.staticIpv6: - return ([{"type": "dhcp6"}], []) + return ([], []) subnet_list = [] # Static Ipv6 diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index 70002086738..f7661929e49 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -593,7 +593,7 @@ def update_ssh_config_lines(lines, updates): # Keywords are case-insensitive and arguments are case-sensitive casemap = dict([(k.lower(), k) for k in updates.keys()]) - for (i, line) in enumerate(lines, start=1): + for i, line in enumerate(lines, start=1): if not line.key: continue if line.key in casemap: diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 1d911aaf3ac..854e318e992 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -81,9 +81,9 @@ def update_event_enabled( case, we only have the data source's `default_update_events`, so an event that should be enabled in userdata may be denied. """ - default_events: Dict[ - EventScope, Set[EventType] - ] = datasource.default_update_events + default_events: Dict[EventScope, Set[EventType]] = ( + datasource.default_update_events + ) user_events: Dict[EventScope, Set[EventType]] = userdata_to_events( cfg.get("updates", {}) ) @@ -985,9 +985,9 @@ def _find_networking_config( } if self.datasource and hasattr(self.datasource, "network_config"): - available_cfgs[ - NetworkConfigSource.DS - ] = self.datasource.network_config + available_cfgs[NetworkConfigSource.DS] = ( + self.datasource.network_config + ) if self.datasource: order = self.datasource.network_config_sources diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 9cb3d4a0088..f404d1130bd 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -293,8 +293,7 @@ def __init__(self, response: requests.Response): @property def contents(self) -> bytes: if self._response.content is None: - # typeshed bug: https://github.com/python/typeshed/pull/12180 - return b"" # type: ignore + return b"" return self._response.content @property @@ -463,7 +462,7 @@ def readurl( req_args["headers"] = headers filtered_req_args = {} - for (k, v) in req_args.items(): + for k, v in req_args.items(): if k == "data": continue if k == "headers" and headers_redact: diff --git a/cloudinit/util.py b/cloudinit/util.py index 34d3623a7f7..31ba1c83574 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -973,7 +973,7 @@ def read_optional_seed(fill, base="", ext="", timeout=5): fill["user-data"] = ud fill["vendor-data"] = vd fill["meta-data"] = md - fill["network-config"] = md + fill["network-config"] = network return True except url_helper.UrlError as e: if e.code == url_helper.NOT_FOUND: diff --git a/conftest.py b/conftest.py index 11aeae8facd..77111a81cab 100644 --- a/conftest.py +++ b/conftest.py @@ -7,6 +7,7 @@ any of these tests run: that is to say, they must be listed in ``integration-requirements.txt`` and in ``test-requirements.txt``. """ + # If we don't import this early, lru_cache may get applied before we have the # chance to patch. This is also too early for the pytest-antilru plugin # to work. diff --git a/doc/man/cloud-init.1 b/doc/man/cloud-init.1 index 8776099c02a..d69c5abae32 100644 --- a/doc/man/cloud-init.1 +++ b/doc/man/cloud-init.1 @@ -4,7 +4,7 @@ cloud-init \- Cloud instance initialization .SH SYNOPSIS -.BR "cloud-init" " [-h] [-d] [-f FILES] [--force] [-v] [SUBCOMMAND]" +.BR "cloud-init" " [-h] [-d] [--force] [-v] [SUBCOMMAND]" .SH DESCRIPTION Cloud-init provides a mechanism for cloud instance initialization. @@ -12,27 +12,19 @@ This is done by identifying the cloud platform that is in use, reading provided cloud metadata and optional vendor and user data, and then initializing the instance as requested. -Generally, this command is not normally meant to be run directly by -the user. However, some subcommands may useful for development or -debug of deployments. - .SH OPTIONS .TP .B "-h, --help" Show help message and exit. -.TP -.B "-d, --debug" -Show additional pre-action logging (default: False). - -.TP -.B "--force" -Force running even if no datasource is found (use at your own risk). - .TP .B "-v, --version" Show program's version number and exit. +.TP +.B "--all-stages" +INTERNAL: Run cloud-init's stages under a single process using a syncronization protocol. This is not intended for CLI usage. + .SH SUBCOMMANDS Please see the help output for each subcommand for additional details, flags, and subcommands. @@ -57,14 +49,6 @@ Run development tools. See help output for subcommand details. .B "features" List defined features. -.TP -.B "init" -Initialize cloud-init and execute initial modules. - -.TP -.B "modules" -Activate modules using a given configuration key. - .TP .B "query" Query standardized instance metadata from the command line. @@ -75,12 +59,30 @@ Validate cloud-config files using jsonschema. .TP .B "single" -Run a single module. +Manually run a single module. Useful for testing during development. .TP .B "status" Report cloud-init status or wait on completion. +.SH DEPRECATED + +.TP +.B "-d, --debug" +Show additional pre-action logging (default: False). + +.TP +.B "--force" +Force running even if no datasource is found (use at your own risk). + +.TP +.B "init" +Initialize cloud-init and execute initial modules. + +.TP +.B "modules" +Activate modules using a given configuration key. + .SH EXIT STATUS .IP @@ -95,4 +97,4 @@ Report cloud-init status or wait on completion. Copyright (C) 2020 Canonical Ltd. License GPL-3 or Apache-2.0 .SH SEE ALSO -Full documentation at: +Full documentation at: diff --git a/doc/module-docs/cc_package_update_upgrade_install/data.yaml b/doc/module-docs/cc_package_update_upgrade_install/data.yaml index 121720ab1bd..9474857f560 100644 --- a/doc/module-docs/cc_package_update_upgrade_install/data.yaml +++ b/doc/module-docs/cc_package_update_upgrade_install/data.yaml @@ -1,13 +1,16 @@ cc_package_update_upgrade_install: description: | This module allows packages to be updated, upgraded or installed during - boot. If any packages are to be installed or an upgrade is to be performed - then the package cache will be updated first. If a package installation or - upgrade requires a reboot, then a reboot can be performed if - ``package_reboot_if_required`` is specified. + boot using any available package manager present on a system such as apt, + pkg, snap, yum or zypper. If any packages are to be installed or an upgrade + is to be performed then the package cache will be updated first. If a + package installation or upgrade requires a reboot, then a reboot can be + performed if ``package_reboot_if_required`` is specified. examples: - comment: | Example 1: file: cc_package_update_upgrade_install/example1.yaml + - comment: "By default, ``package_upgrade: true`` performs upgrades on any installed package manager. To avoid calling ``snap refresh`` in images with snap installed, set snap refresh.hold to ``forever`` will prevent cloud-init's snap interaction during any boot" + file: cc_package_update_upgrade_install/example2.yaml name: Package Update Upgrade Install title: Update, upgrade, and install packages diff --git a/doc/module-docs/cc_package_update_upgrade_install/example2.yaml b/doc/module-docs/cc_package_update_upgrade_install/example2.yaml new file mode 100644 index 00000000000..754712ca384 --- /dev/null +++ b/doc/module-docs/cc_package_update_upgrade_install/example2.yaml @@ -0,0 +1,7 @@ +#cloud-config +package_update: true +package_upgrade: true +snap: + commands: + 00: snap refresh --hold=forever +package_reboot_if_required: true diff --git a/doc/rtd/development/contribute_code.rst b/doc/rtd/development/contribute_code.rst index c6bc399c898..46b6755c5df 100644 --- a/doc/rtd/development/contribute_code.rst +++ b/doc/rtd/development/contribute_code.rst @@ -1,6 +1,17 @@ Contribute to the code ********************** +.. toctree:: + :maxdepth: 1 + :hidden: + + testing.rst + integration_tests.rst + module_creation.rst + datasource_creation.rst + dir_layout.rst + feature_flags.rst + For a run-through of the entire process, the following pages will be your best starting point: @@ -17,34 +28,6 @@ Testing Submissions to cloud-init must include testing. Unit testing and integration testing are integral parts of contributing code. -.. toctree:: - :maxdepth: 1 - :hidden: - - testing.rst - integration_tests.rst - -* :doc:`Unit testing overview and design principles` -* :doc:`Integration testing` - -Popular contributions -===================== - -.. toctree:: - :maxdepth: 1 - :hidden: - - module_creation.rst - datasource_creation.rst - -The two most popular contributions we receive are new cloud config -:doc:`modules ` and new -:doc:`datasources `; these pages will provide instructions -on how to create them. - -Note that any new modules should use underscores in any new config options and -not hyphens (e.g. ``new_option`` and *not* ``new-option``). - Code style and design ===================== @@ -54,7 +37,7 @@ We generally adhere to `PEP 8`_, and this is enforced by our use of ``black``, Python support -------------- -Cloud-init upstream currently supports Python 3.6 and above. +Cloud-init upstream currently supports Python 3.8 and above. Cloud-init upstream will stay compatible with a particular Python version for 6 years after release. After 6 years, we will stop testing upstream changes @@ -70,6 +53,8 @@ version changed: * - Cloud-init version - Python version + * - 24.3 + - 3.8+ * - 22.1 - 3.6+ * - 20.3 @@ -84,22 +69,6 @@ The cloud-init codebase uses Python's annotation support for storing type annotations in the style specified by `PEP-484`_ and `PEP-526`_. Their use in the codebase is encouraged. -Other resources -=============== - -.. toctree:: - :maxdepth: 1 - :hidden: - - dir_layout.rst - -* :doc:`Explanation of the directory structure` - -Feature flags -------------- - -.. automodule:: cloudinit.features - :members: .. LINKS: .. include:: ../links.txt diff --git a/doc/rtd/development/feature_flags.rst b/doc/rtd/development/feature_flags.rst new file mode 100644 index 00000000000..c87a7982aab --- /dev/null +++ b/doc/rtd/development/feature_flags.rst @@ -0,0 +1,6 @@ +Feature flags +************* + +.. automodule:: cloudinit.features + :members: + diff --git a/doc/rtd/explanation/analyze.rst b/doc/rtd/explanation/analyze.rst index 3ab9f1b7fd2..04205aec704 100644 --- a/doc/rtd/explanation/analyze.rst +++ b/doc/rtd/explanation/analyze.rst @@ -3,15 +3,18 @@ Performance *********** -The :command:`analyze` subcommand was added to ``cloud-init`` to help analyze -``cloud-init`` boot time performance. It is loosely based on -``systemd-analyze``, where there are four subcommands: +The :command:`analyze` subcommand helps to analyze ``cloud-init`` boot time +performance. It is loosely based on ``systemd-analyze``, where there are four +subcommands: - :command:`blame` - :command:`show` - :command:`dump` - :command:`boot` +The analyze subcommand works by parsing the cloud-init log file for timestamps +associated with specific events. + Usage ===== diff --git a/doc/rtd/explanation/boot.rst b/doc/rtd/explanation/boot.rst index ff3b65ebd28..ac1f6193125 100644 --- a/doc/rtd/explanation/boot.rst +++ b/doc/rtd/explanation/boot.rst @@ -48,8 +48,7 @@ Detect A platform identification tool called ``ds-identify`` runs in the first stage. This tool detects which platform the instance is running on. This tool is integrated into the init system to disable cloud-init when no platform is -found, and enable cloud-init when a valid platform is detected. This stage -might not be present for every installation of cloud-init. +found, and enable cloud-init when a valid platform is detected. .. _boot-Local: @@ -88,10 +87,9 @@ is rendered. This includes clearing of all previous (stale) configuration including persistent device naming with old MAC addresses. This stage must block network bring-up or any stale configuration that might -have already been applied. Otherwise, that could have negative effects such -as DHCP hooks or broadcast of an old hostname. It would also put the system -in an odd state to recover from, as it may then have to restart network -devices. +have already been applied. Otherwise, that could have negative effects such as +broadcast of an old hostname. It would also put the system in an odd state to +recover from, as it may then have to restart network devices. ``Cloud-init`` then exits and expects for the continued boot of the operating system to bring network configuration up as configured. @@ -189,95 +187,7 @@ finished, the :command:`cloud-init status --wait` subcommand can help block external scripts until ``cloud-init`` is done without having to write your own ``systemd`` units dependency chains. See :ref:`cli_status` for more info. -.. _boot-First_boot_determination: - -First boot determination -======================== - -``Cloud-init`` has to determine whether or not the current boot is the first -boot of a new instance, so that it applies the appropriate configuration. On -an instance's first boot, it should run all "per-instance" configuration, -whereas on a subsequent boot it should run only "per-boot" configuration. This -section describes how ``cloud-init`` performs this determination, as well as -why it is necessary. - -When it runs, ``cloud-init`` stores a cache of its internal state for use -across stages and boots. - -If this cache is present, then ``cloud-init`` has run on this system -before [#not-present]_. There are two cases where this could occur. Most -commonly, the instance has been rebooted, and this is a second/subsequent -boot. Alternatively, the filesystem has been attached to a *new* instance, -and this is the instance's first boot. The most obvious case where this -happens is when an instance is launched from an image captured from a -launched instance. - -By default, ``cloud-init`` attempts to determine which case it is running -in by checking the instance ID in the cache against the instance ID it -determines at runtime. If they do not match, then this is an instance's -first boot; otherwise, it's a subsequent boot. Internally, ``cloud-init`` -refers to this behaviour as ``check``. - -This behaviour is required for images captured from launched instances to -behave correctly, and so is the default that generic cloud images ship with. -However, there are cases where it can cause problems [#problems]_. For these -cases, ``cloud-init`` has support for modifying its behaviour to trust the -instance ID that is present in the system unconditionally. This means that -``cloud-init`` will never detect a new instance when the cache is present, -and it follows that the only way to cause ``cloud-init`` to detect a new -instance (and therefore its first boot) is to manually remove -``cloud-init``'s cache. Internally, this behaviour is referred to as -``trust``. - -To configure which of these behaviours to use, ``cloud-init`` exposes the -``manual_cache_clean`` configuration option. When ``false`` (the default), -``cloud-init`` will ``check`` and clean the cache if the instance IDs do -not match (this is the default, as discussed above). When ``true``, -``cloud-init`` will ``trust`` the existing cache (and therefore not clean it). - -Manual cache cleaning -===================== - -``Cloud-init`` ships a command for manually cleaning the cache: -:command:`cloud-init clean`. See :ref:`cli_clean`'s documentation for further -details. - -Reverting ``manual_cache_clean`` setting ----------------------------------------- - -Currently there is no support for switching an instance that is launched with -``manual_cache_clean: true`` from ``trust`` behaviour to ``check`` behaviour, -other than manually cleaning the cache. - -.. warning:: If you want to capture an instance that is currently in ``trust`` - mode as an image for launching other instances, you **must** manually clean - the cache. If you do not do so, then instances launched from the captured - image will all detect their first boot as a subsequent boot of the captured - instance, and will not apply any per-instance configuration. - - This is a functional issue, but also a potential security one: - ``cloud-init`` is responsible for rotating SSH host keys on first boot, - and this will not happen on these instances. - -.. [#not-present] It follows that if this cache is not present, - ``cloud-init`` has not run on this system before, so this is - unambiguously this instance's first boot. - -.. [#problems] A couple of ways in which this strict reliance on the presence - of a datasource has been observed to cause problems: - - - If a cloud's metadata service is flaky and ``cloud-init`` cannot - obtain the instance ID locally on that platform, ``cloud-init``'s - instance ID determination will sometimes fail to determine the current - instance ID, which makes it impossible to determine if this is an - instance's first or subsequent boot (`#1885527`_). - - If ``cloud-init`` is used to provision a physical appliance or device - and an attacker can present a datasource to the device with a different - instance ID, then ``cloud-init``'s default behaviour will detect this as - an instance's first boot and reset the device using the attacker's - configuration (this has been observed with the - :ref:`NoCloud datasource` in `#1879530`_). +See the :ref:`first boot documentation ` to learn how +cloud-init decides that a boot is the "first boot". .. _generator: https://www.freedesktop.org/software/systemd/man/systemd.generator.html -.. _#1885527: https://bugs.launchpad.net/ubuntu/+source/cloud-init/+bug/1885527 -.. _#1879530: https://bugs.launchpad.net/ubuntu/+source/cloud-init/+bug/1879530 diff --git a/doc/rtd/explanation/events.rst b/doc/rtd/explanation/events.rst index 38356d38eb0..4335ae2f2c8 100644 --- a/doc/rtd/explanation/events.rst +++ b/doc/rtd/explanation/events.rst @@ -66,9 +66,10 @@ Hotplug ======= When the ``hotplug`` event is supported by the datasource and configured in -user data, ``cloud-init`` will respond to the addition or removal of network -interfaces to the system. In addition to fetching and updating the system -metadata, ``cloud-init`` will also bring up/down the newly added interface. +:ref:`user data`, ``cloud-init`` will respond to the +addition or removal of network interfaces to the system. In addition to +fetching and updating the system metadata, ``cloud-init`` will also bring +up/down the newly added interface. .. warning:: Due to its use of ``systemd`` sockets, ``hotplug`` functionality is diff --git a/doc/rtd/explanation/first_boot.rst b/doc/rtd/explanation/first_boot.rst new file mode 100644 index 00000000000..2348e6e2c0a --- /dev/null +++ b/doc/rtd/explanation/first_boot.rst @@ -0,0 +1,91 @@ +.. _First_boot_determination: + +First boot determination +======================== + +``Cloud-init`` has to determine whether or not the current boot is the first +boot of a new instance, so that it applies the appropriate configuration. On +an instance's first boot, it should run all "per-instance" configuration, +whereas on a subsequent boot it should run only "per-boot" configuration. This +section describes how ``cloud-init`` performs this determination, as well as +why it is necessary. + +When it runs, ``cloud-init`` stores a cache of its internal state for use +across stages and boots. + +If this cache is present, then ``cloud-init`` has run on this system +before [#not-present]_. There are two cases where this could occur. Most +commonly, the instance has been rebooted, and this is a second/subsequent +boot. Alternatively, the filesystem has been attached to a *new* instance, +and this is the instance's first boot. The most obvious case where this +happens is when an instance is launched from an image captured from a +launched instance. + +By default, ``cloud-init`` attempts to determine which case it is running +in by checking the instance ID in the cache against the instance ID it +determines at runtime. If they do not match, then this is an instance's +first boot; otherwise, it's a subsequent boot. Internally, ``cloud-init`` +refers to this behaviour as ``check``. + +This behaviour is required for images captured from launched instances to +behave correctly, and so is the default that generic cloud images ship with. +However, there are cases where it can cause problems [#problems]_. For these +cases, ``cloud-init`` has support for modifying its behaviour to trust the +instance ID that is present in the system unconditionally. This means that +``cloud-init`` will never detect a new instance when the cache is present, +and it follows that the only way to cause ``cloud-init`` to detect a new +instance (and therefore its first boot) is to manually remove +``cloud-init``'s cache. Internally, this behaviour is referred to as +``trust``. + +To configure which of these behaviours to use, ``cloud-init`` exposes the +``manual_cache_clean`` configuration option. When ``false`` (the default), +``cloud-init`` will ``check`` and clean the cache if the instance IDs do +not match (this is the default, as discussed above). When ``true``, +``cloud-init`` will ``trust`` the existing cache (and therefore not clean it). + +Manual cache cleaning +===================== + +``Cloud-init`` ships a command for manually cleaning the cache: +:command:`cloud-init clean`. See :ref:`cli_clean`'s documentation for further +details. + +Reverting ``manual_cache_clean`` setting +---------------------------------------- + +Currently there is no support for switching an instance that is launched with +``manual_cache_clean: true`` from ``trust`` behaviour to ``check`` behaviour, +other than manually cleaning the cache. + +.. warning:: If you want to capture an instance that is currently in ``trust`` + mode as an image for launching other instances, you **must** manually clean + the cache. If you do not do so, then instances launched from the captured + image will all detect their first boot as a subsequent boot of the captured + instance, and will not apply any per-instance configuration. + + This is a functional issue, but also a potential security one: + ``cloud-init`` is responsible for rotating SSH host keys on first boot, + and this will not happen on these instances. + +.. [#not-present] It follows that if this cache is not present, + ``cloud-init`` has not run on this system before, so this is + unambiguously this instance's first boot. + +.. [#problems] A couple of ways in which this strict reliance on the presence + of a datasource has been observed to cause problems: + + - If a cloud's metadata service is flaky and ``cloud-init`` cannot + obtain the instance ID locally on that platform, ``cloud-init``'s + instance ID determination will sometimes fail to determine the current + instance ID, which makes it impossible to determine if this is an + instance's first or subsequent boot (`#1885527`_). + - If ``cloud-init`` is used to provision a physical appliance or device + and an attacker can present a datasource to the device with a different + instance ID, then ``cloud-init``'s default behaviour will detect this as + an instance's first boot and reset the device using the attacker's + configuration (this has been observed with the + :ref:`NoCloud datasource` in `#1879530`_). + +.. _#1885527: https://bugs.launchpad.net/ubuntu/+source/cloud-init/+bug/1885527 +.. _#1879530: https://bugs.launchpad.net/ubuntu/+source/cloud-init/+bug/1879530 diff --git a/doc/rtd/explanation/format.rst b/doc/rtd/explanation/format.rst index bed2b61af11..7d8a4a2176c 100644 --- a/doc/rtd/explanation/format.rst +++ b/doc/rtd/explanation/format.rst @@ -5,7 +5,9 @@ User data formats User data is configuration data provided by a user of a cloud platform to an instance at launch. User data can be passed to cloud-init in any of many -formats documented here. +formats documented here. User data is combined with the other +:ref:`configuration sources` to create a combined configuration +which modifies an instance. Configuration types =================== @@ -385,6 +387,11 @@ as binary data and so may be processed automatically. |Part handler |#part-handler |text/part-handler | +--------------------+-----------------------------+-------------------------+ +Continued reading +================= + +See the :ref:`configuration sources` documentation for +information about other sources of configuration for cloud-init. .. _make-mime: https://github.com/canonical/cloud-init/blob/main/cloudinit/cmd/devel/make_mime.py .. _YAML: https://yaml.org/spec/1.1/current.html diff --git a/doc/rtd/explanation/index.rst b/doc/rtd/explanation/index.rst index 503c7098a00..8a1adc4639e 100644 --- a/doc/rtd/explanation/index.rst +++ b/doc/rtd/explanation/index.rst @@ -11,9 +11,10 @@ knowledge and become better at using and configuring ``cloud-init``. :maxdepth: 1 introduction.rst + format.rst configuration.rst boot.rst - format.rst + first_boot.rst events.rst instancedata.rst vendordata.rst diff --git a/doc/rtd/explanation/instancedata.rst b/doc/rtd/explanation/instancedata.rst index d2aadc083ee..1196fcb3793 100644 --- a/doc/rtd/explanation/instancedata.rst +++ b/doc/rtd/explanation/instancedata.rst @@ -63,10 +63,10 @@ provided to this instance. Non-root users referencing ``userdata`` or Using ``instance-data`` ======================= -``instance-data`` can be used in: +``instance-data`` can be used in the following configuration types: * :ref:`User data scripts`. -* :ref:`Cloud-config data`. +* :ref:`Cloud-config`. * :ref:`Base configuration`. * Command line interface via :command:`cloud-init query` or :command:`cloud-init devel render`. diff --git a/doc/rtd/explanation/introduction.rst b/doc/rtd/explanation/introduction.rst index ce7f9da7706..d14fe19c518 100644 --- a/doc/rtd/explanation/introduction.rst +++ b/doc/rtd/explanation/introduction.rst @@ -113,6 +113,6 @@ and how it works, you will probably want to You can also read in more detail about what cloud-init does :ref:`during the different boot stages`, and the -:ref:`types of configuration` you can pass to cloud-init and +:ref:`types of configuration` you can pass to cloud-init and how they're used. diff --git a/doc/rtd/explanation/kernel-command-line.rst b/doc/rtd/explanation/kernel-command-line.rst index 501812b8c75..c7f861a69ed 100644 --- a/doc/rtd/explanation/kernel-command-line.rst +++ b/doc/rtd/explanation/kernel-command-line.rst @@ -2,18 +2,12 @@ Kernel command line ******************* Providing configuration data via the kernel command line is somewhat of a last -resort, since this method only supports -:ref:`cloud config` starting with -`#cloud-config`, and many datasources do not support injecting kernel -command line arguments without modifying the bootloader. - -Despite the limitations of using the kernel command line, cloud-init supports -some use-cases. +resort, since many datasources do not support injecting kernel command line +arguments without modifying the bootloader. Note that this page describes kernel command line behavior that applies -to all clouds. To provide a local configuration with an image using kernel -command line, see :ref:`datasource NoCloud` which provides -more configuration options. +to all clouds. The :ref:`NoCloud datasource` provides more +configuration options. .. _kernel_datasource_override: diff --git a/doc/rtd/explanation/vendordata.rst b/doc/rtd/explanation/vendordata.rst index a2340c2fab9..0e5e1881694 100644 --- a/doc/rtd/explanation/vendordata.rst +++ b/doc/rtd/explanation/vendordata.rst @@ -29,9 +29,10 @@ Input formats ============= ``Cloud-init`` will download and cache to filesystem any vendor data that it -finds. Vendor data is handled exactly like user data. This means that the -vendor can supply multi-part input and have those parts acted on in the same -way as with user data. +finds. Vendor data is handled exactly like +:ref:`user data`. This means that the vendor can supply +multi-part input and have those parts acted on in the same way as with user +data. The only differences are: diff --git a/doc/rtd/howto/rerun_cloud_init.rst b/doc/rtd/howto/rerun_cloud_init.rst index b7adb30ff30..9af4d19e3ce 100644 --- a/doc/rtd/howto/rerun_cloud_init.rst +++ b/doc/rtd/howto/rerun_cloud_init.rst @@ -64,33 +64,31 @@ a result. .. _partially_rerun_cloud_init: -How to partially re-run cloud-init -================================== - -If the behavior you are testing runs on every boot, there are a couple -of ways to test this behavior. - Manually run cloud-init stages ------------------------------ -Note that during normal boot of cloud-init, the init system runs these -stages at specific points during boot. This means that running the code -manually after booting the system may cause the code to interact with -the system in a different way than it does while it boots. +During normal boot of cloud-init, the init system runs the following command +command: .. code-block:: shell-session - cloud-init init --local - cloud-init init - cloud-init modules --mode=config - cloud-init modules --mode=final + cloud-init --all-stages + +Keep in mind that running this manually may not behave the same as cloud-init +behaves when it is started by the init system. The first reason for this is +that cloud-init's stages are intended to run before and after specific events +in the boot order, so there are no guarantees that it will do the right thing +when running out of order. The second reason is that cloud-init will skip its +normal synchronization protocol when it detects that stdin is a tty for purpose +of debugging and development. + +This command cannot be expected to be stable when executed outside of the init +system due to its ordering requirements. Reboot the instance ------------------- -Rebooting the instance will take a little bit longer, however it will -make cloud-init stages run at the correct times during boot, so it will -behave more correctly. +Rebooting the instance will re-run any parts of cloud-init that run per-boot. .. code-block:: shell-session diff --git a/doc/rtd/reference/breaking_changes.rst b/doc/rtd/reference/breaking_changes.rst index 0df6fcfde58..0eba4431f0d 100644 --- a/doc/rtd/reference/breaking_changes.rst +++ b/doc/rtd/reference/breaking_changes.rst @@ -11,6 +11,45 @@ releases. many operating system vendors patch out breaking changes in cloud-init to ensure consistent behavior on their platform. +24.3 +==== + +Single Process Optimization +--------------------------- + +As a performance optimization, cloud-init no longer runs as four seperate +Python processes. Instead, it launches a single process and then +communicates with the init system over a Unix socket to allow the init system +to tell it when it should start each stage and to tell the init system when +each stage has completed. Init system ordering is preserved. + +This should have no noticable affect for end users, besides a faster boot time. +This is a breaking change for two reasons: + +1. a precaution to avoid unintentionally breaking users on stable distributions +2. this change included renaming a systemd service: + ``cloud-init.service`` -> ``cloud-init-network.service`` + +The now-deprecated command line arguments used to invoke each stage will still +be supported for a period of time to allow for adoption and stabilization. Any +systemd distribution that wants to revert this behavior may want to +`patch this change`_. + +Support has not yet been added for non-systemd distributions, however it is +possible to add support. + +Note that this change adds dependency on the openbsd netcat implementation, +which is already on Ubuntu as part of ``ubuntu-minimal``. + +Addition of NoCloud network-config +---------------------------------- + +The NoCloud datasource now has support for providing network configuration +using network-config. Any installation that doesn't provide this configuration +file will experience a retry/timeout in boot. Adding an empty +``network-config`` file should provide backwards compatibility with previous +behavior. + 24.1 ==== @@ -96,3 +135,6 @@ behavior as a result of this change. Workarounds include updating the kernel command line and optionally configuring a ``datasource_list`` in ``/etc/cloud/cloud.cfg.d/*.cfg``. + + +.. _patch this change: https://github.com/canonical/cloud-init/blob/ubuntu/noble/debian/patches/no-single-process.patch diff --git a/doc/rtd/reference/cli.rst b/doc/rtd/reference/cli.rst index eb800b22a75..bdc59c2808a 100644 --- a/doc/rtd/reference/cli.rst +++ b/doc/rtd/reference/cli.rst @@ -15,20 +15,20 @@ Example output: .. code-block:: - usage: cloud-init [-h] [--version] [--debug] [--force] - {init,modules,single,query,features,analyze,devel,collect-logs,clean,status,schema} ... + usage: cloud-init [-h] [--version] [--debug] [--force] [--all-stages] {init,modules,single,query,features,analyze,devel,collect-logs,clean,status,schema} ... options: -h, --help show this help message and exit --version, -v Show program's version number and exit. --debug, -d Show additional pre-action logging (default: False). --force Force running even if no datasource is found (use at your own risk). + --all-stages Run cloud-init's stages under a single process using a syncronization protocol. This is not intended for CLI usage. Subcommands: {init,modules,single,query,features,analyze,devel,collect-logs,clean,status,schema} - init Initialize cloud-init and perform initial modules. - modules Activate modules using a given configuration key. - single Run a single module. + init DEPRECATED: Initialize cloud-init and perform initial modules. + modules DEPRECATED: Activate modules using a given configuration key. + single Manually run a single module. Useful for testing during development. query Query standardized instance metadata from the command line. features List defined features. analyze Devel tool: Analyze cloud-init logs and data. @@ -185,8 +185,8 @@ Example output: .. _cli_init: -:command:`init` -=============== +:command:`init` (deprecated) +============================ Generally run by OS init systems to execute ``cloud-init``'s stages: *init* and *init-local*. See :ref:`boot_stages` for more info. @@ -200,8 +200,8 @@ generally gated to run only once due to semaphores in .. _cli_modules: -:command:`modules` -================== +:command:`modules` (deprecated) +=============================== Generally run by OS init systems to execute ``modules:config`` and ``modules:final`` boot stages. This executes cloud config :ref:`modules` diff --git a/doc/rtd/reference/datasources/vmware.rst b/doc/rtd/reference/datasources/vmware.rst index 1d4bbd7fd50..cea24a4a82f 100644 --- a/doc/rtd/reference/datasources/vmware.rst +++ b/doc/rtd/reference/datasources/vmware.rst @@ -389,7 +389,7 @@ this datasource using the GuestInfo keys transport: Otherwise ``cloud-init`` may not run in first-boot mode. For more information on how the boot mode is determined, please see the - :ref:`First Boot Documentation `. + :ref:`first boot documentation `. .. raw:: html diff --git a/pyproject.toml b/pyproject.toml index df969290451..2adba376194 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,6 @@ build-backend = "setuptools.build_meta" line-length = 79 include = '(brpm|bddeb|\.py)$' - [tool.isort] profile = "black" line_length = 79 @@ -32,6 +31,7 @@ module = [ "paramiko.*", "pip.*", "pycloudlib.*", + "responses", "serial", "tests.integration_tests.user_settings", "uaclient.*", @@ -69,7 +69,6 @@ module = [ "cloudinit.distros.ubuntu", "cloudinit.distros.ug_util", "cloudinit.helpers", - "cloudinit.log", "cloudinit.mergers", "cloudinit.net", "cloudinit.net.cmdline", diff --git a/requirements.txt b/requirements.txt index eabd7a22cd8..3c6bf49e432 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,8 +3,8 @@ # Used for untemplating any files or strings with parameters. jinja2 -# This one is currently only used by the MAAS datasource. If that -# datasource is removed, this is no longer needed +# This one is currently only used by the MAAS datasource and the Event +# reporting feature when configured to use webhooks. oauthlib # This one is currently used only by the CloudSigma and SmartOS datasources. diff --git a/setup.py b/setup.py index 3e33d0062bd..9ca4a8a2a40 100644 --- a/setup.py +++ b/setup.py @@ -312,7 +312,7 @@ def finalize_options(self): [ (RULES_PATH + "/rules.d", [f for f in glob("udev/*.rules")]), ( - ETC + "/systemd/system/sshd-keygen@.service.d/", + INITSYS_ROOTS["systemd"] + "/sshd-keygen@.service.d/", ["systemd/disable-sshd-keygen-if-cloud-init-active.conf"], ), ] diff --git a/tests/integration_tests/bugs/test_gh626.py b/tests/integration_tests/bugs/test_gh626.py index 204161e2c2e..a1f3b97e13c 100644 --- a/tests/integration_tests/bugs/test_gh626.py +++ b/tests/integration_tests/bugs/test_gh626.py @@ -3,6 +3,7 @@ Ensure if wakeonlan is specified in the network config that it is rendered in the /etc/network/interfaces or netplan config. """ + import pytest import yaml diff --git a/tests/integration_tests/bugs/test_gh632.py b/tests/integration_tests/bugs/test_gh632.py index 9e67fe593a4..bd26e6b39d2 100644 --- a/tests/integration_tests/bugs/test_gh632.py +++ b/tests/integration_tests/bugs/test_gh632.py @@ -3,6 +3,7 @@ Verify that if cloud-init is using DataSourceRbxCloud, there is no traceback if the metadata disk cannot be found. """ + import pytest from tests.integration_tests.instances import IntegrationInstance diff --git a/tests/integration_tests/bugs/test_gh868.py b/tests/integration_tests/bugs/test_gh868.py index 67ac9b3a6af..7c0af6b2ff8 100644 --- a/tests/integration_tests/bugs/test_gh868.py +++ b/tests/integration_tests/bugs/test_gh868.py @@ -1,4 +1,5 @@ """Ensure no Traceback when 'chef_license' is set""" + import pytest from tests.integration_tests.instances import IntegrationInstance diff --git a/tests/integration_tests/bugs/test_lp1835584.py b/tests/integration_tests/bugs/test_lp1835584.py index b2d1b1bd866..f44edca80e3 100644 --- a/tests/integration_tests/bugs/test_lp1835584.py +++ b/tests/integration_tests/bugs/test_lp1835584.py @@ -25,6 +25,7 @@ https://bugs.launchpad.net/cloud-init/+bug/1835584 """ + import re import pytest diff --git a/tests/integration_tests/bugs/test_lp1886531.py b/tests/integration_tests/bugs/test_lp1886531.py index d56ca320e2b..d170a133d35 100644 --- a/tests/integration_tests/bugs/test_lp1886531.py +++ b/tests/integration_tests/bugs/test_lp1886531.py @@ -9,6 +9,7 @@ https://bugs.launchpad.net/ubuntu/+source/cloud-init/+bug/1886531 """ + import pytest from tests.integration_tests.util import verify_clean_log diff --git a/tests/integration_tests/bugs/test_lp1898997.py b/tests/integration_tests/bugs/test_lp1898997.py index 631285955d7..d183223b9ac 100644 --- a/tests/integration_tests/bugs/test_lp1898997.py +++ b/tests/integration_tests/bugs/test_lp1898997.py @@ -9,6 +9,7 @@ network configuration, and confirms that the bridge can be used to ping the default gateway. """ + import pytest from tests.integration_tests import random_mac_address diff --git a/tests/integration_tests/bugs/test_lp1901011.py b/tests/integration_tests/bugs/test_lp1901011.py index e94caf9b520..4a25c602c14 100644 --- a/tests/integration_tests/bugs/test_lp1901011.py +++ b/tests/integration_tests/bugs/test_lp1901011.py @@ -4,6 +4,7 @@ See https://github.com/canonical/cloud-init/pull/800 """ + import pytest from tests.integration_tests.clouds import IntegrationCloud diff --git a/tests/integration_tests/bugs/test_lp1910835.py b/tests/integration_tests/bugs/test_lp1910835.py index aa0fb75c27b..ff8390f7e0a 100644 --- a/tests/integration_tests/bugs/test_lp1910835.py +++ b/tests/integration_tests/bugs/test_lp1910835.py @@ -17,6 +17,7 @@ material: if the Azure datasource has removed the CRLFs correctly, then they will match. """ + import pytest from tests.integration_tests.integration_settings import PLATFORM diff --git a/tests/integration_tests/bugs/test_lp1912844.py b/tests/integration_tests/bugs/test_lp1912844.py index b5aafa76797..15fcf81635f 100644 --- a/tests/integration_tests/bugs/test_lp1912844.py +++ b/tests/integration_tests/bugs/test_lp1912844.py @@ -14,6 +14,7 @@ the traceback that they cause. We work around this by calling ``get_interfaces_by_mac` directly in the test code. """ + import pytest from tests.integration_tests import random_mac_address diff --git a/tests/integration_tests/cmd/test_schema.py b/tests/integration_tests/cmd/test_schema.py index c954484012a..b019e4c2f4f 100644 --- a/tests/integration_tests/cmd/test_schema.py +++ b/tests/integration_tests/cmd/test_schema.py @@ -1,4 +1,5 @@ """Tests for `cloud-init status`""" + from textwrap import dedent import pytest diff --git a/tests/integration_tests/cmd/test_status.py b/tests/integration_tests/cmd/test_status.py index 50396be709c..fe9946b06a0 100644 --- a/tests/integration_tests/cmd/test_status.py +++ b/tests/integration_tests/cmd/test_status.py @@ -1,4 +1,5 @@ """Tests for `cloud-init status`""" + import json import pytest diff --git a/tests/integration_tests/datasources/test_none.py b/tests/integration_tests/datasources/test_none.py index 6d7216e3dca..d79c30404d8 100644 --- a/tests/integration_tests/datasources/test_none.py +++ b/tests/integration_tests/datasources/test_none.py @@ -1,4 +1,5 @@ """DataSourceNone integration tests on LXD.""" + import json from tests.integration_tests.instances import IntegrationInstance diff --git a/tests/integration_tests/modules/test_ca_certs.py b/tests/integration_tests/modules/test_ca_certs.py index 03b362302f0..352dad164ce 100644 --- a/tests/integration_tests/modules/test_ca_certs.py +++ b/tests/integration_tests/modules/test_ca_certs.py @@ -6,6 +6,7 @@ * Mark this as running on Debian and Alpine (once we have marks for that) * Implement testing for the RHEL-specific paths """ + import os.path import pytest diff --git a/tests/integration_tests/modules/test_command_output.py b/tests/integration_tests/modules/test_command_output.py index 96525cac0c4..5179b11150d 100644 --- a/tests/integration_tests/modules/test_command_output.py +++ b/tests/integration_tests/modules/test_command_output.py @@ -4,6 +4,7 @@ (This is ported from ``tests/cloud_tests/testcases/main/command_output_simple.yaml``.)""" + import pytest from tests.integration_tests.instances import IntegrationInstance diff --git a/tests/integration_tests/modules/test_keys_to_console.py b/tests/integration_tests/modules/test_keys_to_console.py index a6d644396f4..70f5c1c7007 100644 --- a/tests/integration_tests/modules/test_keys_to_console.py +++ b/tests/integration_tests/modules/test_keys_to_console.py @@ -2,6 +2,7 @@ (This is ported from ``tests/cloud_tests/testcases/modules/keys_to_console.yaml``.)""" + import pytest from tests.integration_tests.decorators import retry diff --git a/tests/integration_tests/modules/test_lxd.py b/tests/integration_tests/modules/test_lxd.py index 308ef185357..a4ff5906a23 100644 --- a/tests/integration_tests/modules/test_lxd.py +++ b/tests/integration_tests/modules/test_lxd.py @@ -3,6 +3,7 @@ (This is ported from ``tests/cloud_tests/testcases/modules/lxd_bridge.yaml``.) """ + import warnings import pytest diff --git a/tests/integration_tests/modules/test_ntp_servers.py b/tests/integration_tests/modules/test_ntp_servers.py index fc62e63b346..217140c5268 100644 --- a/tests/integration_tests/modules/test_ntp_servers.py +++ b/tests/integration_tests/modules/test_ntp_servers.py @@ -7,6 +7,7 @@ ``tests/cloud_tests/testcases/modules/ntp_pools.yaml``, and ``tests/cloud_tests/testcases/modules/ntp_chrony.yaml``) """ + import re import pytest diff --git a/tests/integration_tests/modules/test_package_update_upgrade_install.py b/tests/integration_tests/modules/test_package_update_upgrade_install.py index b4c2d3dd102..7da54054263 100644 --- a/tests/integration_tests/modules/test_package_update_upgrade_install.py +++ b/tests/integration_tests/modules/test_package_update_upgrade_install.py @@ -86,6 +86,40 @@ def test_snap_packages_are_installed(self, class_client): assert "curl" in output assert "postman" in output + def test_snap_refresh_not_called_when_refresh_hold_forever( + self, class_client + ): + """Assert snap refresh is not called when snap refresh --hold is set. + + Certain network-limited or secure environments may opt to avoid + contacting snap API endpoints. In those scenarios, it is expected + that automated snap refresh is held for all snaps. Typically, this is + done with snap refresh --hold in those environments. + + Assert cloud-init does not attempt to call snap refresh when + refresh.hold is forever. + """ + assert class_client.execute( + [ + "grep", + r"Running command \['snap', 'refresh'", + "/var/log/cloud-init.log", + ] + ).ok + assert class_client.execute("snap refresh --hold").ok + class_client.instance.clean() + class_client.restart() + assert class_client.execute( + [ + "grep", + r"Running command \['snap', 'refresh']", + "/var/log/cloud-init.log", + ] + ).failed + assert class_client.execute( + "grep 'Skipping snap refresh' /var/log/cloud-init.log" + ).ok + HELLO_VERSIONS_BY_RELEASE = { "oracular": "2.10-3build2", diff --git a/tests/integration_tests/modules/test_puppet.py b/tests/integration_tests/modules/test_puppet.py index 796f316a711..9598b8ec971 100644 --- a/tests/integration_tests/modules/test_puppet.py +++ b/tests/integration_tests/modules/test_puppet.py @@ -1,4 +1,5 @@ """Test installation configuration of puppet module.""" + import pytest from tests.integration_tests.instances import IntegrationInstance diff --git a/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py b/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py index 158eb880e09..f13672a6c47 100644 --- a/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py +++ b/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py @@ -8,6 +8,7 @@ ``tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.yaml``, ``tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.yaml``. )""" + import re import pytest diff --git a/tests/integration_tests/modules/test_users_groups.py b/tests/integration_tests/modules/test_users_groups.py index f1352f860cf..a904cd9f6f2 100644 --- a/tests/integration_tests/modules/test_users_groups.py +++ b/tests/integration_tests/modules/test_users_groups.py @@ -4,6 +4,7 @@ * This module assumes that the "ubuntu" user will be created when "default" is specified; this will need modification to run on other OSes. """ + import re import pytest diff --git a/tests/integration_tests/modules/test_wireguard.py b/tests/integration_tests/modules/test_wireguard.py index e685a269cbe..9ff1a21b2dc 100644 --- a/tests/integration_tests/modules/test_wireguard.py +++ b/tests/integration_tests/modules/test_wireguard.py @@ -1,4 +1,5 @@ """Integration test for the wireguard module.""" + import pytest from pycloudlib.lxd.instance import LXDInstance diff --git a/tests/unittests/config/test_cc_package_update_upgrade_install.py b/tests/unittests/config/test_cc_package_update_upgrade_install.py index ad3651ad7b9..c1ede2bc574 100644 --- a/tests/unittests/config/test_cc_package_update_upgrade_install.py +++ b/tests/unittests/config/test_cc_package_update_upgrade_install.py @@ -122,7 +122,7 @@ def _isfile(filename: str): caplog.set_level(logging.WARNING) with mock.patch( - "cloudinit.subp.subp", return_value=("fakeout", "fakeerr") + "cloudinit.subp.subp", return_value=SubpResult("{}", "fakeerr") ) as m_subp: with mock.patch("os.path.isfile", side_effect=_isfile): with mock.patch(M_PATH + "time.sleep") as m_sleep: diff --git a/tests/unittests/config/test_cc_resizefs.py b/tests/unittests/config/test_cc_resizefs.py index 36daf9f8952..5a3057f40a2 100644 --- a/tests/unittests/config/test_cc_resizefs.py +++ b/tests/unittests/config/test_cc_resizefs.py @@ -22,7 +22,7 @@ get_schema, validate_cloudconfig_schema, ) -from cloudinit.subp import ProcessExecutionError +from cloudinit.subp import ProcessExecutionError, SubpResult from tests.unittests.helpers import ( CiTestCase, mock, @@ -62,7 +62,7 @@ def test_cannot_skip_ufs_resize(self, m_subp): fs_type = "ufs" resize_what = "/" devpth = "/dev/da0p2" - m_subp.return_value = ( + m_subp.return_value = SubpResult( "stdout: super-block backups (for fsck_ffs -b #) at:\n\n", "growfs: no room to allocate last cylinder group; " "leaving 364KB unused\n", @@ -457,7 +457,7 @@ def test_resize_btrfs_mount_is_ro(self, m_subp, m_is_dir, m_is_rw): """Do not resize / directly if it is read-only. (LP: #1734787).""" m_is_rw.return_value = False m_is_dir.return_value = True - m_subp.return_value = ("btrfs-progs v4.19 \n", "") + m_subp.return_value = SubpResult("btrfs-progs v4.19 \n", "") self.assertEqual( ("btrfs", "filesystem", "resize", "max", "//.snapshots"), _resize_btrfs("/", "/dev/sda1"), @@ -470,7 +470,7 @@ def test_resize_btrfs_mount_is_rw(self, m_subp, m_is_dir, m_is_rw): """Do not resize / directly if it is read-only. (LP: #1734787).""" m_is_rw.return_value = True m_is_dir.return_value = True - m_subp.return_value = ("btrfs-progs v4.19 \n", "") + m_subp.return_value = SubpResult("btrfs-progs v4.19 \n", "") self.assertEqual( ("btrfs", "filesystem", "resize", "max", "/"), _resize_btrfs("/", "/dev/sda1"), @@ -485,7 +485,24 @@ def test_resize_btrfs_mount_is_rw_has_queue( """Queue the resize request if btrfs >= 5.10""" m_is_rw.return_value = True m_is_dir.return_value = True - m_subp.return_value = ("btrfs-progs v5.10 \n", "") + m_subp.return_value = SubpResult("btrfs-progs v5.10 \n", "") + self.assertEqual( + ("btrfs", "filesystem", "resize", "--enqueue", "max", "/"), + _resize_btrfs("/", "/dev/sda1"), + ) + + @mock.patch("cloudinit.util.mount_is_read_write") + @mock.patch("cloudinit.config.cc_resizefs.os.path.isdir") + @mock.patch("cloudinit.subp.subp") + def test_resize_btrfs_version(self, m_subp, m_is_dir, m_is_rw): + """Queue the resize request if btrfs >= 6.10""" + m_is_rw.return_value = True + m_is_dir.return_value = True + m_subp.return_value = SubpResult( + "btrfs-progs v6.10 \n\n-EXPERIMENTAL -INJECT -STATIC +LZO +ZSTD " + "+UDEV +FSVERITY +ZONED CRYPTO=libgcrypt", + "", + ) self.assertEqual( ("btrfs", "filesystem", "resize", "--enqueue", "max", "/"), _resize_btrfs("/", "/dev/sda1"), @@ -555,12 +572,12 @@ def test_get_device_info_from_zpool_handles_no_zpool(self, m_sub, m_os): @mock.patch(M_PATH + "os") @mock.patch("cloudinit.subp.subp") - def test_get_device_info_from_zpool_on_error(self, zpool_output, m_os): + def test_get_device_info_from_zpool_on_error(self, m_subp, m_os): # mock /dev/zfs exists m_os.path.exists.return_value = True # mock subp command from get_mount_info_fs_on_zpool - zpool_output.return_value = ( + m_subp.return_value = SubpResult( readResource("zpool_status_simple.txt"), "error", ) diff --git a/tests/unittests/config/test_cc_seed_random.py b/tests/unittests/config/test_cc_seed_random.py index 76b9b796a65..15c59523466 100644 --- a/tests/unittests/config/test_cc_seed_random.py +++ b/tests/unittests/config/test_cc_seed_random.py @@ -225,7 +225,7 @@ def test_file_in_environment_for_command(self): def apply_patches(patches): ret = [] - for (ref, name, replace) in patches: + for ref, name, replace in patches: if replace is None: continue orig = getattr(ref, name) diff --git a/tests/unittests/config/test_cc_ubuntu_pro.py b/tests/unittests/config/test_cc_ubuntu_pro.py index 40f8035b30d..07ba8c69bc8 100644 --- a/tests/unittests/config/test_cc_ubuntu_pro.py +++ b/tests/unittests/config/test_cc_ubuntu_pro.py @@ -450,12 +450,14 @@ class TestUbuntuProSchema: # If __version__ no longer exists on jsonschema, that means # we're using a high enough version of jsonschema to not need # to skip this test. - JSONSCHEMA_SKIP_REASON - if lifecycle.Version.from_str( - getattr(jsonschema, "__version__", "999") - ) - < lifecycle.Version(4) - else "", + ( + JSONSCHEMA_SKIP_REASON + if lifecycle.Version.from_str( + getattr(jsonschema, "__version__", "999") + ) + < lifecycle.Version(4) + else "" + ), id="deprecation_of_ubuntu_advantage_skip_old_json", ), # Strict keys @@ -1121,9 +1123,9 @@ def test_should_auto_attach_error(self, caplog, fake_uaclient): m_should_auto_attach.should_auto_attach.side_effect = ( FakeUserFacingError("Some error") # noqa: E501 ) - sys.modules[ - "uaclient.api.u.pro.attach.auto.should_auto_attach.v1" - ] = m_should_auto_attach + sys.modules["uaclient.api.u.pro.attach.auto.should_auto_attach.v1"] = ( + m_should_auto_attach + ) assert not _should_auto_attach({}) assert "Error during `should_auto_attach`: Some error" in caplog.text assert ( @@ -1145,9 +1147,9 @@ def test_happy_path( self, ua_section, expected_result, caplog, fake_uaclient ): m_should_auto_attach = mock.Mock() - sys.modules[ - "uaclient.api.u.pro.attach.auto.should_auto_attach.v1" - ] = m_should_auto_attach + sys.modules["uaclient.api.u.pro.attach.auto.should_auto_attach.v1"] = ( + m_should_auto_attach + ) should_auto_attach_value = object() m_should_auto_attach.should_auto_attach.return_value.should_auto_attach = ( # noqa: E501 should_auto_attach_value @@ -1174,9 +1176,9 @@ def test_full_auto_attach_error(self, caplog, mocker, fake_uaclient): m_full_auto_attach.full_auto_attach.side_effect = FakeUserFacingError( "Some error" ) - sys.modules[ - "uaclient.api.u.pro.attach.auto.full_auto_attach.v1" - ] = m_full_auto_attach + sys.modules["uaclient.api.u.pro.attach.auto.full_auto_attach.v1"] = ( + m_full_auto_attach + ) expected_msg = "Error during `full_auto_attach`: Some error" with pytest.raises(RuntimeError, match=re.escape(expected_msg)): _auto_attach(self.ua_section) @@ -1185,9 +1187,9 @@ def test_full_auto_attach_error(self, caplog, mocker, fake_uaclient): def test_happy_path(self, caplog, mocker, fake_uaclient): mocker.patch.dict("sys.modules") sys.modules["uaclient.config"] = mock.Mock() - sys.modules[ - "uaclient.api.u.pro.attach.auto.full_auto_attach.v1" - ] = mock.Mock() + sys.modules["uaclient.api.u.pro.attach.auto.full_auto_attach.v1"] = ( + mock.Mock() + ) _auto_attach(self.ua_section) assert "Attaching to Ubuntu Pro took" in caplog.text diff --git a/tests/unittests/config/test_cc_write_files.py b/tests/unittests/config/test_cc_write_files.py index ec0024971ad..7f7f1740112 100644 --- a/tests/unittests/config/test_cc_write_files.py +++ b/tests/unittests/config/test_cc_write_files.py @@ -138,13 +138,14 @@ def test_all_decodings(self): b64 = (base64.b64encode(data), b64_aliases) for content, aliases in (gz, gz_b64, b64): for enc in aliases: + path = "/tmp/file-%s-%s" % (name, enc) cur = { "content": content, - "path": "/tmp/file-%s-%s" % (name, enc), + "path": path, "encoding": enc, } files.append(cur) - expected.append((cur["path"], data)) + expected.append((path, data)) write_files("test_decoding", files, self.owner) diff --git a/tests/unittests/conftest.py b/tests/unittests/conftest.py index 9401f2235ef..375e1d14840 100644 --- a/tests/unittests/conftest.py +++ b/tests/unittests/conftest.py @@ -85,7 +85,7 @@ def fake_filesystem(mocker, tmpdir): # exists, but then it fails because of the retargeting that happens here. tmpdir.mkdir("tmp") - for (mod, funcs) in FS_FUNCS.items(): + for mod, funcs in FS_FUNCS.items(): for f, nargs in funcs: func = getattr(mod, f) trap_func = retarget_many_wrapper(str(tmpdir), nargs, func) diff --git a/tests/unittests/distros/test_netconfig.py b/tests/unittests/distros/test_netconfig.py index d0c64a24a39..b447757bef6 100644 --- a/tests/unittests/distros/test_netconfig.py +++ b/tests/unittests/distros/test_netconfig.py @@ -313,11 +313,11 @@ def assertCfgEquals(self, blob1, blob2): b1 = dict(SysConf(blob1.strip().splitlines())) b2 = dict(SysConf(blob2.strip().splitlines())) self.assertEqual(b1, b2) - for (k, v) in b1.items(): + for k, v in b1.items(): self.assertIn(k, b2) - for (k, v) in b2.items(): + for k, v in b2.items(): self.assertIn(k, b1) - for (k, v) in b1.items(): + for k, v in b1.items(): self.assertEqual(v, b2[k]) diff --git a/tests/unittests/distros/test_ubuntu.py b/tests/unittests/distros/test_ubuntu.py index 39be1b2efaf..2391447e2af 100644 --- a/tests/unittests/distros/test_ubuntu.py +++ b/tests/unittests/distros/test_ubuntu.py @@ -1,7 +1,10 @@ # This file is part of cloud-init. See LICENSE file for license information. +import logging + import pytest from cloudinit.distros import fetch +from cloudinit.subp import SubpResult class TestPackageCommand: @@ -14,7 +17,7 @@ def test_package_command_only_refresh_snap_when_available( "cloudinit.distros.ubuntu.Snap.available", return_value=snap_available, ) - m_snap_upgrade_packges = mocker.patch( + m_snap_upgrade_packages = mocker.patch( "cloudinit.distros.ubuntu.Snap.upgrade_packages", return_value=snap_available, ) @@ -27,6 +30,81 @@ def test_package_command_only_refresh_snap_when_available( m_apt_run_package_command.assert_called_once_with("upgrade") m_snap_available.assert_called_once() if snap_available: - m_snap_upgrade_packges.assert_called_once() + m_snap_upgrade_packages.assert_called_once() + else: + m_snap_upgrade_packages.assert_not_called() + + @pytest.mark.parametrize( + "subp_side_effect,expected_log", + ( + pytest.param( + [ + SubpResult( + stdout='{"refresh": {"hold": "forever"}}', stderr=None + ) + ], + "Skipping snap refresh because refresh.hold is set to" + " 'forever'", + id="skip_snap_refresh_due_to_global_hold_forever", + ), + pytest.param( + [ + SubpResult( + stdout=( + '{"refresh": {"hold":' + ' "2024-07-08T15:38:20-06:00"}}' + ), + stderr=None, + ), + SubpResult(stdout="All snaps up to date.", stderr=""), + ], + "", + id="perform_snap_refresh_due_to_temporary_global_hold", + ), + pytest.param( + [ + SubpResult( + stdout="{}", + stderr=( + 'error: snap "core" has no "refresh.hold" ' + "configuration option" + ), + ), + SubpResult(stdout="All snaps up to date.", stderr=""), + ], + "", + id="snap_refresh_performed_when_no_global_hold_is_set", + ), + ), + ) + def test_package_command_avoids_snap_refresh_when_refresh_hold_is_forever( + self, subp_side_effect, expected_log, caplog, mocker + ): + """Do not call snap refresh when snap refresh.hold is forever. + + This indicates an environment where snaps refreshes are not preferred + for whatever reason. + """ + m_snap_available = mocker.patch( + "cloudinit.distros.ubuntu.Snap.available", + return_value=True, + ) + m_subp = mocker.patch( + "cloudinit.subp.subp", + side_effect=subp_side_effect, + ) + m_apt_run_package_command = mocker.patch( + "cloudinit.distros.package_management.apt.Apt.run_package_command", + ) + cls = fetch("ubuntu") + distro = cls("ubuntu", {}, None) + with caplog.at_level(logging.INFO): + distro.package_command("upgrade") + m_apt_run_package_command.assert_called_once_with("upgrade") + m_snap_available.assert_called_once() + expected_calls = [mocker.call(["snap", "get", "system", "-d"])] + if expected_log: + assert expected_log in caplog.text else: - m_snap_upgrade_packges.assert_not_called() + expected_calls.append(mocker.call(["snap", "refresh"])) + assert m_subp.call_args_list == expected_calls diff --git a/tests/unittests/distros/test_user_data_normalize.py b/tests/unittests/distros/test_user_data_normalize.py index 8fd187b4754..3ff2c49fb14 100644 --- a/tests/unittests/distros/test_user_data_normalize.py +++ b/tests/unittests/distros/test_user_data_normalize.py @@ -302,7 +302,7 @@ def test_create_snap_user(self, mock_subp): ], } users, _groups = self._norm(ug_cfg, distro) - for (user, config) in users.items(): + for user, config in users.items(): print("user=%s config=%s" % (user, config)) username = distro.create_user(user, **config) @@ -322,7 +322,7 @@ def test_create_snap_user_known(self, mock_subp): ], } users, _groups = self._norm(ug_cfg, distro) - for (user, config) in users.items(): + for user, config in users.items(): print("user=%s config=%s" % (user, config)) username = distro.create_user(user, **config) @@ -353,7 +353,7 @@ def test_add_user_on_snappy_system( ], } users, _groups = self._norm(ug_cfg, distro) - for (user, config) in users.items(): + for user, config in users.items(): print("user=%s config=%s" % (user, config)) distro.add_user(user, **config) diff --git a/tests/unittests/filters/test_launch_index.py b/tests/unittests/filters/test_launch_index.py index 071cc66f563..1b2ebfb2e20 100644 --- a/tests/unittests/filters/test_launch_index.py +++ b/tests/unittests/filters/test_launch_index.py @@ -21,7 +21,7 @@ def count_messages(root): class TestLaunchFilter(helpers.ResourceUsingTestCase): def assertCounts(self, message, expected_counts): orig_message = copy.deepcopy(message) - for (index, count) in expected_counts.items(): + for index, count in expected_counts.items(): index = util.safe_int(index) filtered_message = launch_index.Filter(index).apply(message) self.assertEqual(count_messages(filtered_message), count) diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index ab97973e02a..dfd9a508c25 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -313,7 +313,7 @@ def tearDown(self): def replicateTestRoot(self, example_root, target_root): real_root = resourceLocation() real_root = os.path.join(real_root, "roots", example_root) - for (dir_path, _dirnames, filenames) in os.walk(real_root): + for dir_path, _dirnames, filenames in os.walk(real_root): real_path = dir_path make_path = rebase_path(real_path[len(real_root) :], target_root) util.ensure_dir(make_path) @@ -340,8 +340,8 @@ def patchUtils(self, new_root): ("write_json", 1), ], } - for (mod, funcs) in patch_funcs.items(): - for (f, am) in funcs: + for mod, funcs in patch_funcs.items(): + for f, am in funcs: func = getattr(mod, f) trap_func = retarget_many_wrapper(new_root, am, func) self.patched_funcs.enter_context( @@ -388,7 +388,7 @@ def patchOS(self, new_root): # py27 does not have scandir patch_funcs[os].append(("scandir", 1)) - for (mod, funcs) in patch_funcs.items(): + for mod, funcs in patch_funcs.items(): for f, nargs in funcs: func = getattr(mod, f) trap_func = retarget_many_wrapper(new_root, nargs, func) @@ -511,7 +511,7 @@ def populate_dir(path, files): if not os.path.exists(path): os.makedirs(path) ret = [] - for (name, content) in files.items(): + for name, content in files.items(): p = os.path.sep.join([path, name]) util.ensure_dir(os.path.dirname(p)) with open(p, "wb") as fp: @@ -599,11 +599,16 @@ def skipIfAptPkg(): try: + import importlib.metadata + import jsonschema assert jsonschema # avoid pyflakes error F401: import unused _jsonschema_version = tuple( - int(part) for part in jsonschema.__version__.split(".") # type: ignore + int(part) + for part in importlib.metadata.metadata("jsonschema") + .get("Version", "") + .split(".") ) _missing_jsonschema_dep = False except ImportError: diff --git a/tests/unittests/net/network_configs.py b/tests/unittests/net/network_configs.py index 2b55bbf421a..0779c5809a9 100644 --- a/tests/unittests/net/network_configs.py +++ b/tests/unittests/net/network_configs.py @@ -2160,8 +2160,6 @@ [bond] mode=active-backup - miimon=100 - xmit_hash_policy=layer3+4 [ipv6] method=auto @@ -3055,13 +3053,13 @@ params: bond-mode: active-backup bond_miimon: 100 - bond-xmit-hash-policy: "layer3+4" - bond-num-grat-arp: 5 + bond-xmit_hash_policy: "layer3+4" + bond-num_grat_arp: 5 bond-downdelay: 10 bond-updelay: 20 - bond-fail-over-mac: active + bond-fail_over_mac: active bond-primary: bond0s0 - bond-primary-reselect: always + bond-primary_reselect: always subnets: - type: static address: 192.168.0.2/24 @@ -3138,27 +3136,27 @@ auto bond0s0 iface bond0s0 inet manual bond-downdelay 10 - bond-fail-over-mac active + bond-fail_over_mac active bond-master bond0 bond-mode active-backup - bond-num-grat-arp 5 + bond-num_grat_arp 5 bond-primary bond0s0 - bond-primary-reselect always + bond-primary_reselect always bond-updelay 20 - bond-xmit-hash-policy layer3+4 + bond-xmit_hash_policy layer3+4 bond_miimon 100 auto bond0s1 iface bond0s1 inet manual bond-downdelay 10 - bond-fail-over-mac active + bond-fail_over_mac active bond-master bond0 bond-mode active-backup - bond-num-grat-arp 5 + bond-num_grat_arp 5 bond-primary bond0s0 - bond-primary-reselect always + bond-primary_reselect always bond-updelay 20 - bond-xmit-hash-policy layer3+4 + bond-xmit_hash_policy layer3+4 bond_miimon 100 auto bond0 @@ -3166,14 +3164,14 @@ address 192.168.0.2/24 gateway 192.168.0.1 bond-downdelay 10 - bond-fail-over-mac active + bond-fail_over_mac active bond-mode active-backup - bond-num-grat-arp 5 + bond-num_grat_arp 5 bond-primary bond0s0 - bond-primary-reselect always + bond-primary_reselect always bond-slaves none bond-updelay 20 - bond-xmit-hash-policy layer3+4 + bond-xmit_hash_policy layer3+4 bond_miimon 100 hwaddress aa:bb:cc:dd:e8:ff mtu 9000 @@ -3199,12 +3197,8 @@ "ifcfg-bond0": textwrap.dedent( """\ BONDING_MASTER=yes - BONDING_MODULE_OPTS="mode=active-backup xmit_hash_policy=layer3+4 """ - """miimon=100 num_grat_arp=5 """ - """downdelay=10 updelay=20 """ - """fail_over_mac=active """ - """primary=bond0s0 """ - """primary_reselect=always" + BONDING_MODULE_OPTS="mode=active-backup miimon=100 """ + """downdelay=10 updelay=20 primary=bond0s0" BONDING_SLAVE_0=bond0s0 BONDING_SLAVE_1=bond0s1 BOOTPROTO=static @@ -3237,12 +3231,8 @@ "ifcfg-bond0": textwrap.dedent( """\ BONDING_MASTER=yes - BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 """ - """miimon=100 num_grat_arp=5 """ - """downdelay=10 updelay=20 """ - """fail_over_mac=active """ - """primary=bond0s0 """ - """primary_reselect=always" + BONDING_OPTS="mode=active-backup miimon=100 """ + """downdelay=10 updelay=20 primary=bond0s0" BONDING_SLAVE0=bond0s0 BONDING_SLAVE1=bond0s1 BOOTPROTO=none @@ -3361,7 +3351,6 @@ [bond] mode=active-backup - miimon=100 xmit_hash_policy=layer3+4 num_grat_arp=5 downdelay=10 @@ -3719,8 +3708,6 @@ [bond] mode=active-backup - miimon=100 - xmit_hash_policy=layer3+4 num_grat_arp=5 downdelay=10 updelay=20 diff --git a/tests/unittests/net/test_net_rendering.py b/tests/unittests/net/test_net_rendering.py index 3e1490b2713..0f3c766fdeb 100644 --- a/tests/unittests/net/test_net_rendering.py +++ b/tests/unittests/net/test_net_rendering.py @@ -24,6 +24,7 @@ in `unittests/test_net.py`. While that file contains similar tests, it has become too large to be maintainable. """ + import glob from enum import Flag, auto from pathlib import Path diff --git a/tests/unittests/net/test_network_manager.py b/tests/unittests/net/test_network_manager.py index 2aa476d7d15..4551698daba 100644 --- a/tests/unittests/net/test_network_manager.py +++ b/tests/unittests/net/test_network_manager.py @@ -129,6 +129,16 @@ def test_bond_dns_baseline(self, tmpdir): [bond] mode=802.3ad + miimon=100 + xmit_hash_policy=layer3+4 + + [ipv4] + method=disabled + may-fail=false + + [ipv6] + method=disabled + may-fail=false [ethernet] mtu=9000 @@ -278,6 +288,16 @@ def test_bond_dns_redacted_with_method_disabled(self, tmpdir): [bond] mode=802.3ad + miimon=100 + xmit_hash_policy=layer3+4 + + [ipv4] + method=disabled + may-fail=false + + [ipv6] + method=disabled + may-fail=false [ethernet] mtu=9000 diff --git a/tests/unittests/sources/test_akamai.py b/tests/unittests/sources/test_akamai.py index 2480269f6e6..e0472139037 100644 --- a/tests/unittests/sources/test_akamai.py +++ b/tests/unittests/sources/test_akamai.py @@ -38,9 +38,9 @@ def _get_datasource( return_value="", ): if local: - ds: Union[ - DataSourceAkamai, DataSourceAkamaiLocal - ] = DataSourceAkamaiLocal(sys_cfg, None, None) + ds: Union[DataSourceAkamai, DataSourceAkamaiLocal] = ( + DataSourceAkamaiLocal(sys_cfg, None, None) + ) else: ds = DataSourceAkamai(sys_cfg, None, None) diff --git a/tests/unittests/sources/test_nocloud.py b/tests/unittests/sources/test_nocloud.py index b98ff73c9ac..8701b9bd131 100644 --- a/tests/unittests/sources/test_nocloud.py +++ b/tests/unittests/sources/test_nocloud.py @@ -384,7 +384,7 @@ def test_parse_cmdline_data_valid(self): ), ) - for (fmt, expected) in pairs: + for fmt, expected in pairs: fill = {} cmdline = fmt % {"ds_id": ds_id} ret = parse_cmdline_data(ds_id=ds_id, fill=fill, cmdline=cmdline) diff --git a/tests/unittests/sources/test_openstack.py b/tests/unittests/sources/test_openstack.py index 380fe340890..154a7620759 100644 --- a/tests/unittests/sources/test_openstack.py +++ b/tests/unittests/sources/test_openstack.py @@ -99,7 +99,7 @@ def match_ec2_url(uri, headers): return (200, headers, ec2_files.get(path)) if path == "latest/meta-data/": buf = StringIO() - for (k, v) in ec2_meta.items(): + for k, v in ec2_meta.items(): if isinstance(v, (list, tuple)): buf.write("%s/" % (k)) else: diff --git a/tests/unittests/sources/test_oracle.py b/tests/unittests/sources/test_oracle.py index f1625fbf9f2..2372ca5ecfc 100644 --- a/tests/unittests/sources/test_oracle.py +++ b/tests/unittests/sources/test_oracle.py @@ -1076,9 +1076,9 @@ def test_secondary_nic_addition( """ if configure_secondary_nics is not None: - oracle_ds.ds_cfg[ - "configure_secondary_nics" - ] = configure_secondary_nics + oracle_ds.ds_cfg["configure_secondary_nics"] = ( + configure_secondary_nics + ) oracle_ds._vnics_data = "DummyData" with mock.patch.object( diff --git a/tests/unittests/sources/vmware/test_vmware_config_file.py b/tests/unittests/sources/vmware/test_vmware_config_file.py index c1415934141..fd4bb481e46 100644 --- a/tests/unittests/sources/vmware/test_vmware_config_file.py +++ b/tests/unittests/sources/vmware/test_vmware_config_file.py @@ -241,45 +241,27 @@ def test_get_nics_list_dhcp(self): elif cfg.get("name") == nic2.get("name"): nic2.update(cfg) - # Test NIC1 self.assertEqual("physical", nic1.get("type"), "type of NIC1") self.assertEqual("NIC1", nic1.get("name"), "name of NIC1") self.assertEqual( "00:50:56:a6:8c:08", nic1.get("mac_address"), "mac address of NIC1" ) subnets = nic1.get("subnets") - self.assertEqual(2, len(subnets), "number of subnets for NIC1") - subnet_ipv4 = subnets[0] - self.assertEqual( - "dhcp", subnet_ipv4.get("type"), "Ipv4 DHCP type for NIC1" - ) - self.assertEqual( - "auto", subnet_ipv4.get("control"), "NIC1 Control type" - ) - subnet_ipv6 = subnets[1] - self.assertEqual( - "dhcp6", subnet_ipv6.get("type"), "Ipv6 DHCP type for NIC1" - ) + self.assertEqual(1, len(subnets), "number of subnets for NIC1") + subnet = subnets[0] + self.assertEqual("dhcp", subnet.get("type"), "DHCP type for NIC1") + self.assertEqual("auto", subnet.get("control"), "NIC1 Control type") - # Test NIC2 self.assertEqual("physical", nic2.get("type"), "type of NIC2") self.assertEqual("NIC2", nic2.get("name"), "name of NIC2") self.assertEqual( "00:50:56:a6:5a:de", nic2.get("mac_address"), "mac address of NIC2" ) subnets = nic2.get("subnets") - self.assertEqual(2, len(subnets), "number of subnets for NIC2") - subnet_ipv4 = subnets[0] - self.assertEqual( - "dhcp", subnet_ipv4.get("type"), "Ipv4 DHCP type for NIC2" - ) - self.assertEqual( - "auto", subnet_ipv4.get("control"), "NIC2 Control type" - ) - subnet_ipv6 = subnets[1] - self.assertEqual( - "dhcp6", subnet_ipv6.get("type"), "Ipv6 DHCP type for NIC2" - ) + self.assertEqual(1, len(subnets), "number of subnets for NIC2") + subnet = subnets[0] + self.assertEqual("dhcp", subnet.get("type"), "DHCP type for NIC2") + self.assertEqual("auto", subnet.get("control"), "NIC2 Control type") def test_get_nics_list_static(self): """Tests if NicConfigurator properly calculates network subnets @@ -304,7 +286,6 @@ def test_get_nics_list_static(self): elif cfg.get("name") == nic2.get("name"): nic2.update(cfg) - # Test NIC1 self.assertEqual("physical", nic1.get("type"), "type of NIC1") self.assertEqual("NIC1", nic1.get("name"), "name of NIC1") self.assertEqual( @@ -364,7 +345,6 @@ def test_get_nics_list_static(self): else: self.assertEqual(True, False, "invalid gateway %s" % (gateway)) - # Test NIC2 self.assertEqual("physical", nic2.get("type"), "type of NIC2") self.assertEqual("NIC2", nic2.get("name"), "name of NIC2") self.assertEqual( @@ -372,18 +352,16 @@ def test_get_nics_list_static(self): ) subnets = nic2.get("subnets") - self.assertEqual(2, len(subnets), "Number of subnets for NIC2") + self.assertEqual(1, len(subnets), "Number of subnets for NIC2") - subnet_ipv4 = subnets[0] - self.assertEqual("static", subnet_ipv4.get("type"), "Subnet type") + subnet = subnets[0] + self.assertEqual("static", subnet.get("type"), "Subnet type") self.assertEqual( - "192.168.6.102", subnet_ipv4.get("address"), "Subnet address" + "192.168.6.102", subnet.get("address"), "Subnet address" ) self.assertEqual( - "255.255.0.0", subnet_ipv4.get("netmask"), "Subnet netmask" + "255.255.0.0", subnet.get("netmask"), "Subnet netmask" ) - subnet_ipv6 = subnets[1] - self.assertEqual("dhcp6", subnet_ipv6.get("type"), "Subnet type") def test_custom_script(self): cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") @@ -470,10 +448,7 @@ def test_non_primary_nic_without_gateway(self): "type": "static", "address": "10.20.87.154", "netmask": "255.255.252.0", - }, - { - "type": "dhcp6", - }, + } ], } ], @@ -524,10 +499,7 @@ def test_non_primary_nic_with_gateway(self): "metric": 10000, } ], - }, - { - "type": "dhcp6", - }, + } ], } ], @@ -587,10 +559,7 @@ def test_cust_non_primary_nic_with_gateway_(self): "metric": 10000, } ], - }, - { - "type": "dhcp6", - }, + } ], } ], @@ -635,10 +604,7 @@ def test_a_primary_nic_with_gateway(self): "address": "10.20.87.154", "netmask": "255.255.252.0", "gateway": "10.20.87.253", - }, - { - "type": "dhcp6", - }, + } ], } ], diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py index 14be6fa48e3..7621c5f6c80 100644 --- a/tests/unittests/test_data.py +++ b/tests/unittests/test_data.py @@ -362,9 +362,9 @@ def test_merging_cloud_config(self, tmpdir): - morestuff """ message2 = MIMEBase("text", "cloud-config") - message2[ - "X-Merge-Type" - ] = "dict(recurse_array,recurse_str)+list(append)+str(append)" + message2["X-Merge-Type"] = ( + "dict(recurse_array,recurse_str)+list(append)+str(append)" + ) message2.set_payload(blob2) blob3 = """ diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py index 81c878d2ee2..efb71618ce3 100644 --- a/tests/unittests/test_merging.py +++ b/tests/unittests/test_merging.py @@ -31,7 +31,7 @@ def _old_mergedict(src, cand): Nested dictionaries are merged recursively. """ if isinstance(src, dict) and isinstance(cand, dict): - for (k, v) in cand.items(): + for k, v in cand.items(): if k not in src: src[k] = v else: @@ -145,10 +145,10 @@ def test_merge_cc_samples(self): paths = c_helpers.Paths({}) cc_handler = cloud_config.CloudConfigPartHandler(paths) cc_handler.cloud_fn = None - for (payloads, (expected_merge, expected_fn)) in tests: + for payloads, (expected_merge, expected_fn) in tests: cc_handler.handle_part(None, CONTENT_START, None, None, None, None) merging_fns = [] - for (fn, contents) in payloads: + for fn, contents in payloads: cc_handler.handle_part( None, None, "%s.yaml" % (fn), contents, None, {} ) diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index dbae4f20267..68e44fa8021 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -200,7 +200,7 @@ bond-miimon: 100 bond-mode: 802.3ad bond-updelay: 0 - bond-xmit-hash-policy: layer3+4 + bond-xmit_hash_policy: layer3+4 subnets: - address: 10.101.10.47/23 gateway: 10.101.11.254 @@ -254,7 +254,7 @@ bond-miimon: 100 bond-mode: 802.3ad bond-updelay: 0 - bond-xmit-hash-policy: layer3+4 + bond-xmit_hash_policy: layer3+4 subnets: - type: manual type: bond @@ -296,7 +296,7 @@ bond-miimon: 100 bond-mode: 802.3ad bond-updelay: 0 - bond-xmit-hash-policy: layer3+4 + bond-xmit_hash_policy: layer3+4 subnets: - address: 10.101.8.65/26 routes: @@ -3551,14 +3551,12 @@ def test_render( ) if network_cfg is None: - network_cfg = net.generate_fallback_config() + parsed_cfg = net.generate_fallback_config() else: - network_cfg = yaml.safe_load(network_cfg) - assert isinstance(network_cfg, dict) + parsed_cfg = yaml.safe_load(network_cfg) + assert isinstance(parsed_cfg, dict) - ns = network_state.parse_net_config_data( - network_cfg, skip_broken=False - ) + ns = network_state.parse_net_config_data(parsed_cfg, skip_broken=False) render_dir = os.path.join(tmp_dir, "render") os.makedirs(render_dir) diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index c856f97564f..190eca7610e 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -2456,6 +2456,66 @@ def test_unicode_not_messed_up(self): self.assertNotIn("\x00", roundtripped) +class TestReadOptionalSeed: + @pytest.mark.parametrize( + "seed_dir,expected_fill,retval", + ( + ({}, {}, False), + ({"meta-data": "md"}, {}, False), + ( + {"meta-data": "md: val", "user-data": "ud"}, + { + "meta-data": {"md": "val"}, + "user-data": b"ud", + "network-config": None, + "vendor-data": None, + }, + True, + ), + ( + { + "meta-data": "md: val", + "user-data": "ud", + "network-config": "net: cfg", + }, + { + "meta-data": {"md": "val"}, + "user-data": b"ud", + "network-config": {"net": "cfg"}, + "vendor-data": None, + }, + True, + ), + ( + { + "meta-data": "md: val", + "user-data": "ud", + "vendor-data": "vd", + }, + { + "meta-data": {"md": "val"}, + "user-data": b"ud", + "network-config": None, + "vendor-data": b"vd", + }, + True, + ), + ), + ) + def test_read_optional_seed_sets_fill_on_success( + self, seed_dir, expected_fill, retval, tmpdir + ): + """Set fill dict values based on seed files present.""" + if seed_dir is not None: + helpers.populate_dir(tmpdir.strpath, seed_dir) + fill = {} + assert ( + util.read_optional_seed(fill, tmpdir.strpath + os.path.sep) + is retval + ) + assert fill == expected_fill + + class TestReadSeeded: def test_unicode_not_messed_up(self, tmpdir): ud = b"userdatablob" diff --git a/tools/mock-meta.py b/tools/mock-meta.py index a52536165f9..f8fcd776284 100755 --- a/tools/mock-meta.py +++ b/tools/mock-meta.py @@ -280,7 +280,7 @@ def get_data(self, params, who, **kwargs): return result else: contents = [] - for (i, key_id) in enumerate(key_ids): + for i, key_id in enumerate(key_ids): contents.append("%s=%s" % (i, key_id)) return "\n".join(contents) elif action == "placement": diff --git a/tox.ini b/tox.ini index d6982cbe382..be5e1d647d2 100644 --- a/tox.ini +++ b/tox.ini @@ -15,28 +15,47 @@ package = skip basepython = python3 setenv = LC_ALL = en_US.utf-8 -passenv= +passenv = PYTEST_ADDOPTS HYPOTHESIS_PROFILE +deps = + -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt -[format_deps] -black==22.3.0 -hypothesis==6.31.6 -hypothesis_jsonschema==0.20.1 -isort==5.10.1 -mypy==0.950 -pylint==3.2.0 -pytest==7.0.1 -ruff==0.4.3 -types-jsonschema==4.4.2 -types-Jinja2==2.11.9 -types-oauthlib==3.1.6 -types-passlib==1.7.7.12 -types-PyYAML==6.0.4 -types-requests==2.27.8 -types-setuptools==57.4.9 -typing-extensions==4.1.1 +[types] +deps = + # each release of type stubs relates to a specific version of a library + # so leave these unpinned + types-jsonschema + types-Jinja2 + types-oauthlib + types-passlib + types-PyYAML + types-requests + types-setuptools + typing-extensions +[pinned_versions] +deps = + {[types]deps} + black==24.8.0 + hypothesis==6.111.0 + hypothesis_jsonschema==0.23.1 + isort==5.13.2 + mypy==1.11.1 + pylint==3.2.6 + ruff==0.5.7 + +[latest_versions] +deps = + {[types]deps} + black + hypothesis + hypothesis_jsonschema + isort + mypy + pylint + ruff [files] schema = cloudinit/config/schemas/schema-cloud-config-v1.json @@ -45,100 +64,54 @@ network_v1 = cloudinit/config/schemas/schema-network-config-v1.json network_v2 = cloudinit/config/schemas/schema-network-config-v2.json [testenv:ruff] -deps = - ruff=={[format_deps]ruff} +deps = {[pinned_versions]deps} commands = {envpython} -m ruff check {posargs:.} [testenv:pylint] -deps = - pylint=={[format_deps]pylint} - -r{toxinidir}/test-requirements.txt - -r{toxinidir}/integration-requirements.txt +deps = {[pinned_versions]deps} commands = {envpython} -m pylint {posargs:.} [testenv:black] -deps = - black=={[format_deps]black} +deps = {[pinned_versions]deps} commands = {envpython} -m black --check {posargs:.} [testenv:isort] -deps = - isort=={[format_deps]isort} +deps = {[pinned_versions]deps} commands = {envpython} -m isort --check-only --diff {posargs:.} [testenv:mypy] deps = - -r{toxinidir}/test-requirements.txt -r{toxinidir}/integration-requirements.txt - -r{toxinidir}/doc-requirements.txt - hypothesis=={[format_deps]hypothesis} - hypothesis_jsonschema=={[format_deps]hypothesis_jsonschema} - mypy=={[format_deps]mypy} - types-jsonschema=={[format_deps]types-jsonschema} - types-Jinja2=={[format_deps]types-Jinja2} - types-passlib=={[format_deps]types-passlib} - types-pyyaml=={[format_deps]types-PyYAML} - types-oauthlib=={[format_deps]types-oauthlib} - types-requests=={[format_deps]types-requests} - types-setuptools=={[format_deps]types-setuptools} - typing-extensions=={[format_deps]typing-extensions} + {[testenv]deps} + {[pinned_versions]deps} commands = {envpython} -m mypy {posargs:cloudinit/ tests/ tools/} [testenv:check_format] deps = - black=={[format_deps]black} - ruff=={[format_deps]ruff} - hypothesis=={[format_deps]hypothesis} - hypothesis_jsonschema=={[format_deps]hypothesis_jsonschema} - isort=={[format_deps]isort} - mypy=={[format_deps]mypy} - pylint=={[format_deps]pylint} - types-jsonschema=={[format_deps]types-jsonschema} - types-Jinja2=={[format_deps]types-Jinja2} - types-oauthlib=={[format_deps]types-oauthlib} - types-passlib=={[format_deps]types-passlib} - types-pyyaml=={[format_deps]types-PyYAML} - types-oauthlib=={[format_deps]types-oauthlib} - types-requests=={[format_deps]types-requests} - types-setuptools=={[format_deps]types-setuptools} - typing-extensions=={[format_deps]typing-extensions} - -r{toxinidir}/test-requirements.txt -r{toxinidir}/integration-requirements.txt - -r{toxinidir}/doc-requirements.txt + {[testenv]deps} + {[pinned_versions]deps} commands = - {[testenv:black]commands} - {[testenv:ruff]commands} - {[testenv:isort]commands} - {[testenv:mypy]commands} - {[testenv:pylint]commands} + {envpython} -m ruff check {posargs:.} + {envpython} -m pylint {posargs:.} + {envpython} -m black --check {posargs:.} + {envpython} -m isort --check-only --diff {posargs:.} + {envpython} -m mypy {posargs:cloudinit/ tests/ tools/} [testenv:check_format_tip] deps = - black - ruff - hypothesis - hypothesis_jsonschema - isort - mypy - pylint - types-jsonschema - types-Jinja2 - types-oauthlib - types-passlib - types-pyyaml - types-oauthlib - types-requests - types-setuptools - -r{toxinidir}/test-requirements.txt -r{toxinidir}/integration-requirements.txt - -r{toxinidir}/doc-requirements.txt + {[testenv]deps} + {[latest_versions]deps} commands = - {[testenv:check_format]commands} + {envpython} -m ruff check {posargs:.} + {envpython} -m pylint {posargs:.} + {envpython} -m black --check {posargs:.} + {envpython} -m isort --check-only --diff {posargs:.} + {envpython} -m mypy {posargs:cloudinit/ tests/ tools/} [testenv:do_format] -deps = - black=={[format_deps]black} - isort=={[format_deps]isort} +deps = {[pinned_versions]deps} commands = {envpython} -m isort . {envpython} -m black . @@ -148,35 +121,26 @@ commands = {envpython} -m json.tool --indent 2 {[files]network_v2} {[files]network_v2} [testenv:do_format_tip] -deps = - black - isort +deps = {[latest_versions]deps} commands = - {[testenv:do_format]commands} + {envpython} -m isort . + {envpython} -m black . + {envpython} -m json.tool --indent 2 {[files]schema} {[files]schema} + {envpython} -m json.tool --indent 2 {[files]version} {[files]version} + {envpython} -m json.tool --indent 2 {[files]network_v1} {[files]network_v1} + {envpython} -m json.tool --indent 2 {[files]network_v2} {[files]network_v2} [testenv:py3] -deps = - -r{toxinidir}/test-requirements.txt -commands = {envpython} -m pytest \ - -vvvv --showlocals \ - --durations 10 \ - -m "not hypothesis_slow" \ - --cov=cloudinit --cov-branch \ - {posargs:tests/unittests} +commands = {envpython} -m pytest -m "not hypothesis_slow" --cov=cloud-init --cov-branch {posargs:tests/unittests} -# experimental [testenv:py3-fast] deps = - -r{toxinidir}/test-requirements.txt + {[testenv]deps} pytest-xdist -commands = {envpython} -m pytest -n auto -m "not hypothesis_slow" -m "not serial"\ - {posargs:tests/unittests} +commands = {envpython} -m pytest -n auto -m "not hypothesis_slow" -m "not serial" {posargs:tests/unittests} [testenv:hypothesis-slow] -deps = - hypothesis==6.31.6 - hypothesis_jsonschema==0.20.1 - -r{toxinidir}/test-requirements.txt +deps = {[pinned_versions]deps} commands = {envpython} -m pytest \ -m hypothesis_slow \ --hypothesis-show-statistics \ @@ -184,11 +148,7 @@ commands = {envpython} -m pytest \ #commands = {envpython} -X tracemalloc=40 -Werror::ResourceWarning:cloudinit -m pytest \ [testenv:py3-leak] -deps = {[testenv:py3]deps} -commands = {envpython} -X tracemalloc=40 -Wall -m pytest \ - --durations 10 \ - --cov=cloudinit --cov-branch \ - {posargs:tests/unittests} +commands = {envpython} -X tracemalloc=40 -Wall -m pytest {posargs:tests/unittests} [testenv:lowest-supported] @@ -200,37 +160,34 @@ commands = {envpython} -X tracemalloc=40 -Wall -m pytest \ # To obtain these versions, check the versions of these libraries # in the oldest support Ubuntu distro. Theses versions are from bionic. deps = - jinja2==2.10 - oauthlib==2.0.6 + jinja2==2.10.1 + oauthlib==3.1.0 pyserial==3.4 configobj==5.0.6 - pyyaml==3.12 - requests==2.18.4 - jsonpatch==1.16 - jsonschema==2.6.0 + pyyaml==5.3.1 + requests==2.22.0 + jsonpatch==1.23 + jsonschema==3.2.0 # test-requirements - pytest==3.3.2 - pytest-cov==2.5.1 - pytest-mock==1.7.1 - setuptools==44.0.0 - # Needed by pytest and default causes failures - attrs==17.4.0 - responses==0.5.1 + pytest==4.6.9 + pytest-cov==2.8.1 + pytest-mock==1.10.4 + setuptools==45.2.0 + responses==0.9.0 passlib -commands = {[testenv:py3]commands} + # required for this version of jinja2 + markupsafe==2.0.1 +commands = {envpython} -m pytest -m "not hypothesis_slow" --cov=cloud-init --cov-branch {posargs:tests/unittests} [testenv:doc] -deps = - -r{toxinidir}/doc-requirements.txt +deps = -r{toxinidir}/doc-requirements.txt commands = {envpython} -m sphinx {posargs:-W doc/rtd doc/rtd_html} - doc8 doc/rtd + {envpython} -m doc8 doc/rtd [testenv:doc-spelling] -deps = - -r{toxinidir}/doc-requirements.txt -commands = - {envpython} -m sphinx -b spelling {posargs:-W doc/rtd doc/rtd_html} +deps = -r{toxinidir}/doc-requirements.txt +commands = {envpython} -m sphinx -b spelling {posargs:-W doc/rtd doc/rtd_html} # linkcheck shows false positives and has noisy output. # Despite these limitations, it is better than a manual search of the docs. @@ -240,61 +197,36 @@ commands = # # followed by manual verification of the links reported [testenv:linkcheck] -deps = - -r{toxinidir}/doc-requirements.txt +deps = -r{toxinidir}/doc-requirements.txt commands = {envpython} -m sphinx {posargs:-b linkcheck doc/rtd doc/rtd_html} [testenv:tip-ruff] -deps = ruff -commands = {[testenv:ruff]commands} +deps = {[latest_versions]deps} +commands = {envpython} -m ruff check {posargs:.} [testenv:tip-mypy] deps = - -r{toxinidir}/test-requirements.txt -r{toxinidir}/integration-requirements.txt - -r{toxinidir}/doc-requirements.txt - hypothesis - hypothesis_jsonschema - mypy - pytest - types-Jinja2 - types-jsonschema - types-oauthlib - types-PyYAML - types-passlib - types-pyyaml - types-oauthlib - types-requests - types-setuptools - typing-extensions + {[testenv]deps} + {[latest_versions]deps} commands = {envpython} -m mypy {posargs:cloudinit/ tests/ tools/} [testenv:tip-pylint] -deps = - # requirements - pylint - # test-requirements - -r{toxinidir}/test-requirements.txt - -r{toxinidir}/integration-requirements.txt +deps = {[latest_versions]deps} commands = {envpython} -m pylint {posargs:.} - [testenv:tip-black] -deps = black -commands = {[testenv:black]commands} +deps = {[latest_versions]deps} +commands = {envpython} -m black --check {posargs:.} [testenv:tip-isort] -deps = isort -commands = {[testenv:isort]commands} +deps = {[latest_versions]deps} +commands = {envpython} -m isort --check-only --diff {posargs:.} [testenv:integration-tests] -commands = {envpython} -m pytest -vv \ - --log-cli-level=INFO \ - --durations 10 \ - {posargs:tests/integration_tests} -deps = - -r{toxinidir}/integration-requirements.txt +deps = -r{toxinidir}/integration-requirements.txt +commands = {envpython} -m pytest --log-cli-level=INFO {posargs:tests/integration_tests} passenv = CLOUD_INIT_* PYCLOUDLIB_* @@ -302,22 +234,21 @@ passenv = OS_* [testenv:integration-tests-ci] -commands = {[testenv:integration-tests]commands} -deps = {[testenv:integration-tests]deps} +deps = -r{toxinidir}/integration-requirements.txt +commands = {envpython} -m pytest --log-cli-level=INFO {posargs:tests/integration_tests} passenv = CLOUD_INIT_* SSH_AUTH_SOCK OS_* - TRAVIS setenv = PYTEST_ADDOPTS="-m ci and not adhoc" [testenv:integration-tests-jenkins] # Pytest's RC=1 means "Tests were collected and run but some of the tests failed". # Do not fail in this case, but let Jenkins handle it using the junit report. +deps = -r{toxinidir}/integration-requirements.txt allowlist_externals = sh -commands = sh -c "{envpython} -m pytest --log-cli-level=INFO -vv {posargs:tests/integration_tests/none} || [ $? -eq 1 ]" -deps = {[testenv:integration-tests]deps} +commands = sh -c "{envpython} -m pytest --log-cli-level=INFO {posargs:tests/integration_tests/none} || [ $? -eq 1 ]" passenv = *_proxy CLOUD_INIT_*