ansible-playbook [core 2.17.7] config file = None configured module search path = ['/root/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/lib/python3.12/site-packages/ansible ansible collection location = /tmp/collections-PRc executable location = /usr/local/bin/ansible-playbook python version = 3.12.8 (main, Dec 3 2024, 00:00:00) [GCC 14.2.1 20241104 (Red Hat 14.2.1-6)] (/usr/bin/python3.12) jinja version = 3.1.4 libyaml = True No config file found; using defaults running playbook inside collection fedora.linux_system_roles redirecting (type: callback) ansible.builtin.debug to ansible.posix.debug redirecting (type: callback) ansible.builtin.debug to ansible.posix.debug redirecting (type: callback) ansible.builtin.profile_tasks to ansible.posix.profile_tasks Skipping callback 'default', as we already have a stdout callback. Skipping callback 'minimal', as we already have a stdout callback. Skipping callback 'oneline', as we already have a stdout callback. PLAYBOOK: tests_quadlet_pod.yml ************************************************ 2 plays in /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml PLAY [all] ********************************************************************* TASK [Include vault variables] ************************************************* task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:5 Saturday 18 January 2025 11:33:25 -0500 (0:00:00.007) 0:00:00.007 ****** ok: [managed-node1] => { "ansible_facts": { "__podman_test_password": { "__ansible_vault": "$ANSIBLE_VAULT;1.1;AES256\n35383939616163653333633431363463313831383037386236646138333162396161356130303461\n3932623930643263313563336163316337643562333936360a363538636631313039343233383732\n38666530383538656639363465313230343533386130303833336434303438333161656262346562\n3362626538613031640a663330613638366132356534363534353239616666653466353961323533\n6565\n" }, "mysql_container_root_password": { "__ansible_vault": "$ANSIBLE_VAULT;1.1;AES256\n61333932373230333539663035366431326163363166363036323963623131363530326231303634\n6635326161643165363366323062333334363730376631660a393566366139353861656364656661\n38653463363837336639363032646433666361646535366137303464623261313663643336306465\n6264663730656337310a343962353137386238383064646533366433333437303566656433386233\n34343235326665646661623131643335313236313131353661386338343366316261643634653633\n3832313034366536616531323963333234326461353130303532\n" } }, "ansible_included_var_files": [ "/tmp/podman-JDA/tests/vars/vault-variables.yml" ], "changed": false } PLAY [Ensure that the role can manage quadlet pods] **************************** TASK [Gathering Facts] ********************************************************* task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:9 Saturday 18 January 2025 11:33:25 -0500 (0:00:00.022) 0:00:00.030 ****** [WARNING]: Platform linux on host managed-node1 is using the discovered Python interpreter at /usr/bin/python3.12, but future installation of another Python interpreter could change the meaning of that path. See https://docs.ansible.com/ansible- core/2.17/reference_appendices/interpreter_discovery.html for more information. ok: [managed-node1] TASK [Run the role - root] ***************************************************** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:34 Saturday 18 January 2025 11:33:26 -0500 (0:00:01.413) 0:00:01.443 ****** included: fedora.linux_system_roles.podman for managed-node1 TASK [fedora.linux_system_roles.podman : Set platform/version specific variables] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:3 Saturday 18 January 2025 11:33:26 -0500 (0:00:00.071) 0:00:01.514 ****** included: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml for managed-node1 TASK [fedora.linux_system_roles.podman : Ensure ansible_facts used by role] **** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:3 Saturday 18 January 2025 11:33:26 -0500 (0:00:00.023) 0:00:01.538 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_required_facts | difference(ansible_facts.keys() | list) | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Check if system is ostree] ************ task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:11 Saturday 18 January 2025 11:33:26 -0500 (0:00:00.035) 0:00:01.574 ****** ok: [managed-node1] => { "changed": false, "stat": { "exists": false } } TASK [fedora.linux_system_roles.podman : Set flag to indicate system is ostree] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:16 Saturday 18 January 2025 11:33:27 -0500 (0:00:00.534) 0:00:02.108 ****** ok: [managed-node1] => { "ansible_facts": { "__podman_is_ostree": false }, "changed": false } TASK [fedora.linux_system_roles.podman : Check if transactional-update exists in /sbin] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:23 Saturday 18 January 2025 11:33:27 -0500 (0:00:00.026) 0:00:02.135 ****** ok: [managed-node1] => { "changed": false, "stat": { "exists": false } } TASK [fedora.linux_system_roles.podman : Set flag if transactional-update exists] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:28 Saturday 18 January 2025 11:33:27 -0500 (0:00:00.407) 0:00:02.542 ****** ok: [managed-node1] => { "ansible_facts": { "__podman_is_transactional": false }, "changed": false } TASK [fedora.linux_system_roles.podman : Set platform/version specific variables] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:32 Saturday 18 January 2025 11:33:27 -0500 (0:00:00.023) 0:00:02.566 ****** ok: [managed-node1] => (item=RedHat.yml) => { "ansible_facts": { "__podman_packages": [ "podman", "shadow-utils-subid" ] }, "ansible_included_var_files": [ "/tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/vars/RedHat.yml" ], "ansible_loop_var": "item", "changed": false, "item": "RedHat.yml" } skipping: [managed-node1] => (item=CentOS.yml) => { "ansible_loop_var": "item", "changed": false, "false_condition": "__vars_file is file", "item": "CentOS.yml", "skip_reason": "Conditional result was False" } ok: [managed-node1] => (item=CentOS_10.yml) => { "ansible_facts": { "__podman_packages": [ "iptables-nft", "podman", "shadow-utils-subid" ] }, "ansible_included_var_files": [ "/tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/vars/CentOS_10.yml" ], "ansible_loop_var": "item", "changed": false, "item": "CentOS_10.yml" } ok: [managed-node1] => (item=CentOS_10.yml) => { "ansible_facts": { "__podman_packages": [ "iptables-nft", "podman", "shadow-utils-subid" ] }, "ansible_included_var_files": [ "/tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/vars/CentOS_10.yml" ], "ansible_loop_var": "item", "changed": false, "item": "CentOS_10.yml" } TASK [fedora.linux_system_roles.podman : Gather the package facts] ************* task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:6 Saturday 18 January 2025 11:33:27 -0500 (0:00:00.049) 0:00:02.615 ****** ok: [managed-node1] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Enable copr if requested] ************* task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:10 Saturday 18 January 2025 11:33:29 -0500 (0:00:01.300) 0:00:03.916 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_use_copr | d(false)", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Ensure required packages are installed] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:14 Saturday 18 January 2025 11:33:29 -0500 (0:00:00.074) 0:00:03.990 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "(__podman_packages | difference(ansible_facts.packages))", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Notify user that reboot is needed to apply changes] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:28 Saturday 18 January 2025 11:33:29 -0500 (0:00:00.082) 0:00:04.073 ****** skipping: [managed-node1] => { "false_condition": "__podman_is_transactional | d(false)" } TASK [fedora.linux_system_roles.podman : Reboot transactional update systems] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:33 Saturday 18 January 2025 11:33:29 -0500 (0:00:00.095) 0:00:04.168 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_is_transactional | d(false)", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if reboot is needed and not set] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:38 Saturday 18 January 2025 11:33:29 -0500 (0:00:00.072) 0:00:04.241 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_is_transactional | d(false)", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get podman version] ******************* task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:46 Saturday 18 January 2025 11:33:29 -0500 (0:00:00.060) 0:00:04.302 ****** ok: [managed-node1] => { "changed": false, "cmd": [ "podman", "--version" ], "delta": "0:00:00.024864", "end": "2025-01-18 11:33:29.819433", "rc": 0, "start": "2025-01-18 11:33:29.794569" } STDOUT: podman version 5.3.1 TASK [fedora.linux_system_roles.podman : Set podman version] ******************* task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:52 Saturday 18 January 2025 11:33:29 -0500 (0:00:00.519) 0:00:04.821 ****** ok: [managed-node1] => { "ansible_facts": { "podman_version": "5.3.1" }, "changed": false } TASK [fedora.linux_system_roles.podman : Podman package version must be 4.2 or later] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:56 Saturday 18 January 2025 11:33:29 -0500 (0:00:00.055) 0:00:04.877 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_version is version(\"4.2\", \"<\")", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Podman package version must be 4.4 or later for quadlet, secrets] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:63 Saturday 18 January 2025 11:33:30 -0500 (0:00:00.048) 0:00:04.925 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_version is version(\"4.4\", \"<\")", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Podman package version must be 4.4 or later for quadlet, secrets] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:73 Saturday 18 January 2025 11:33:30 -0500 (0:00:00.081) 0:00:05.007 ****** META: end_host conditional evaluated to False, continuing execution for managed-node1 skipping: [managed-node1] => { "skip_reason": "end_host conditional evaluated to False, continuing execution for managed-node1" } MSG: end_host conditional evaluated to false, continuing execution for managed-node1 TASK [fedora.linux_system_roles.podman : Podman package version must be 5.0 or later for Pod quadlets] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:80 Saturday 18 January 2025 11:33:30 -0500 (0:00:00.090) 0:00:05.097 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_version is version(\"5.0\", \"<\")", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Podman package version must be 5.0 or later for Pod quadlets] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:96 Saturday 18 January 2025 11:33:30 -0500 (0:00:00.073) 0:00:05.170 ****** META: end_host conditional evaluated to False, continuing execution for managed-node1 skipping: [managed-node1] => { "skip_reason": "end_host conditional evaluated to False, continuing execution for managed-node1" } MSG: end_host conditional evaluated to false, continuing execution for managed-node1 TASK [fedora.linux_system_roles.podman : Check user and group information] ***** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:109 Saturday 18 January 2025 11:33:30 -0500 (0:00:00.051) 0:00:05.222 ****** included: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml for managed-node1 TASK [fedora.linux_system_roles.podman : Get user information] ***************** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:2 Saturday 18 January 2025 11:33:30 -0500 (0:00:00.098) 0:00:05.321 ****** ok: [managed-node1] => { "ansible_facts": { "getent_passwd": { "root": [ "x", "0", "0", "Super User", "/root", "/bin/bash" ] } }, "changed": false } TASK [fedora.linux_system_roles.podman : Fail if user does not exist] ********** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:9 Saturday 18 January 2025 11:33:30 -0500 (0:00:00.492) 0:00:05.814 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "not ansible_facts[\"getent_passwd\"][__podman_user]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set group for podman user] ************ task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:16 Saturday 18 January 2025 11:33:30 -0500 (0:00:00.056) 0:00:05.871 ****** ok: [managed-node1] => { "ansible_facts": { "__podman_group": "0" }, "changed": false } TASK [fedora.linux_system_roles.podman : See if getsubids exists] ************** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:31 Saturday 18 January 2025 11:33:31 -0500 (0:00:00.065) 0:00:05.937 ****** ok: [managed-node1] => { "changed": false, "stat": { "atime": 1737217679.5789073, "attr_flags": "", "attributes": [], "block_size": 4096, "blocks": 32, "charset": "binary", "checksum": "89ab10a2a8fa81bcc0c1df0058f200469ce46f97", "ctime": 1737217674.8858979, "dev": 51714, "device_type": 0, "executable": true, "exists": true, "gid": 0, "gr_name": "root", "inode": 9160785, "isblk": false, "ischr": false, "isdir": false, "isfifo": false, "isgid": false, "islnk": false, "isreg": true, "issock": false, "isuid": false, "mimetype": "application/x-pie-executable", "mode": "0755", "mtime": 1730678400.0, "nlink": 1, "path": "/usr/bin/getsubids", "pw_name": "root", "readable": true, "rgrp": true, "roth": true, "rusr": true, "size": 15744, "uid": 0, "version": "1643853349", "wgrp": false, "woth": false, "writeable": true, "wusr": true, "xgrp": true, "xoth": true, "xusr": true } } TASK [fedora.linux_system_roles.podman : Check with getsubids for user subuids] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:42 Saturday 18 January 2025 11:33:31 -0500 (0:00:00.453) 0:00:06.390 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_user not in [\"root\", \"0\"]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Check with getsubids for user subgids] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:47 Saturday 18 January 2025 11:33:31 -0500 (0:00:00.052) 0:00:06.443 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_user not in [\"root\", \"0\"]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set user subuid and subgid info] ****** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:52 Saturday 18 January 2025 11:33:31 -0500 (0:00:00.055) 0:00:06.498 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_user not in [\"root\", \"0\"]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get subuid file] ********************** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:65 Saturday 18 January 2025 11:33:31 -0500 (0:00:00.067) 0:00:06.566 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get subgid file] ********************** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:70 Saturday 18 January 2025 11:33:31 -0500 (0:00:00.052) 0:00:06.619 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set user subuid and subgid info] ****** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:75 Saturday 18 January 2025 11:33:31 -0500 (0:00:00.052) 0:00:06.671 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user not in subuid file] ****** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:85 Saturday 18 January 2025 11:33:31 -0500 (0:00:00.049) 0:00:06.721 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user not in subgid file] ****** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:92 Saturday 18 January 2025 11:33:31 -0500 (0:00:00.046) 0:00:06.767 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set config file paths] **************** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:115 Saturday 18 January 2025 11:33:31 -0500 (0:00:00.048) 0:00:06.815 ****** ok: [managed-node1] => { "ansible_facts": { "__podman_container_conf_file": "/etc/containers/containers.conf.d/50-systemroles.conf", "__podman_policy_json_file": "/etc/containers/policy.json", "__podman_registries_conf_file": "/etc/containers/registries.conf.d/50-systemroles.conf", "__podman_storage_conf_file": "/etc/containers/storage.conf" }, "changed": false } TASK [fedora.linux_system_roles.podman : Handle container.conf.d] ************** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:124 Saturday 18 January 2025 11:33:31 -0500 (0:00:00.083) 0:00:06.898 ****** included: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_container_conf_d.yml for managed-node1 TASK [fedora.linux_system_roles.podman : Ensure containers.d exists] *********** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_container_conf_d.yml:5 Saturday 18 January 2025 11:33:32 -0500 (0:00:00.105) 0:00:07.004 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_containers_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Update container config file] ********* task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_container_conf_d.yml:13 Saturday 18 January 2025 11:33:32 -0500 (0:00:00.050) 0:00:07.054 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_containers_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Handle registries.conf.d] ************* task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:127 Saturday 18 January 2025 11:33:32 -0500 (0:00:00.061) 0:00:07.116 ****** included: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_registries_conf_d.yml for managed-node1 TASK [fedora.linux_system_roles.podman : Ensure registries.d exists] *********** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_registries_conf_d.yml:5 Saturday 18 January 2025 11:33:32 -0500 (0:00:00.110) 0:00:07.226 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_registries_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Update registries config file] ******** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_registries_conf_d.yml:13 Saturday 18 January 2025 11:33:32 -0500 (0:00:00.086) 0:00:07.313 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_registries_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Handle storage.conf] ****************** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:130 Saturday 18 January 2025 11:33:32 -0500 (0:00:00.047) 0:00:07.361 ****** included: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_storage_conf.yml for managed-node1 TASK [fedora.linux_system_roles.podman : Ensure storage.conf parent dir exists] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_storage_conf.yml:5 Saturday 18 January 2025 11:33:32 -0500 (0:00:00.108) 0:00:07.469 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_storage_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Update storage config file] *********** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_storage_conf.yml:13 Saturday 18 January 2025 11:33:32 -0500 (0:00:00.055) 0:00:07.525 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_storage_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Handle policy.json] ******************* task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:133 Saturday 18 January 2025 11:33:32 -0500 (0:00:00.079) 0:00:07.604 ****** included: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_policy_json.yml for managed-node1 TASK [fedora.linux_system_roles.podman : Ensure policy.json parent dir exists] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_policy_json.yml:6 Saturday 18 January 2025 11:33:32 -0500 (0:00:00.115) 0:00:07.719 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_policy_json | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Stat the policy.json file] ************ task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_policy_json.yml:14 Saturday 18 January 2025 11:33:32 -0500 (0:00:00.067) 0:00:07.787 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_policy_json | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get the existing policy.json] ********* task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_policy_json.yml:19 Saturday 18 January 2025 11:33:32 -0500 (0:00:00.046) 0:00:07.834 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_policy_json | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Write new policy.json file] *********** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_policy_json.yml:25 Saturday 18 January 2025 11:33:32 -0500 (0:00:00.069) 0:00:07.904 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_policy_json | length > 0", "skip_reason": "Conditional result was False" } TASK [Manage firewall for specified ports] ************************************* task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:139 Saturday 18 January 2025 11:33:33 -0500 (0:00:00.060) 0:00:07.964 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_firewall | length > 0", "skip_reason": "Conditional result was False" } TASK [Manage selinux for specified ports] ************************************** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:146 Saturday 18 January 2025 11:33:33 -0500 (0:00:00.059) 0:00:08.023 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_selinux_ports | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Keep track of users that need to cancel linger] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:153 Saturday 18 January 2025 11:33:33 -0500 (0:00:00.059) 0:00:08.083 ****** ok: [managed-node1] => { "ansible_facts": { "__podman_cancel_user_linger": [] }, "changed": false } TASK [fedora.linux_system_roles.podman : Handle certs.d files - present] ******* task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:157 Saturday 18 January 2025 11:33:33 -0500 (0:00:00.051) 0:00:08.134 ****** skipping: [managed-node1] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Handle credential files - present] **** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:166 Saturday 18 January 2025 11:33:33 -0500 (0:00:00.047) 0:00:08.182 ****** skipping: [managed-node1] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Handle secrets] *********************** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:175 Saturday 18 January 2025 11:33:33 -0500 (0:00:00.042) 0:00:08.225 ****** skipping: [managed-node1] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Handle Kubernetes specifications] ***** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:182 Saturday 18 January 2025 11:33:33 -0500 (0:00:00.044) 0:00:08.269 ****** skipping: [managed-node1] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Handle Quadlet specifications] ******** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:189 Saturday 18 January 2025 11:33:33 -0500 (0:00:00.043) 0:00:08.313 ****** included: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml for managed-node1 => (item=(censored due to no_log)) included: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml for managed-node1 => (item=(censored due to no_log)) TASK [fedora.linux_system_roles.podman : Set per-container variables part 0] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:14 Saturday 18 January 2025 11:33:33 -0500 (0:00:00.148) 0:00:08.461 ****** ok: [managed-node1] => { "ansible_facts": { "__podman_quadlet_file_src": "", "__podman_quadlet_spec": { "Pod": { "PodName": "quadlet-pod" } }, "__podman_quadlet_str": "", "__podman_quadlet_template_src": "" }, "changed": false } TASK [fedora.linux_system_roles.podman : Set per-container variables part 1] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:25 Saturday 18 January 2025 11:33:33 -0500 (0:00:00.122) 0:00:08.584 ****** ok: [managed-node1] => { "ansible_facts": { "__podman_continue_if_pull_fails": false, "__podman_pull_image": true, "__podman_state": "created", "__podman_systemd_unit_scope": "", "__podman_user": "root" }, "changed": false } TASK [fedora.linux_system_roles.podman : Fail if no quadlet spec is given] ***** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:35 Saturday 18 January 2025 11:33:33 -0500 (0:00:00.112) 0:00:08.697 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_quadlet_spec | length == 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set per-container variables part 2] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:48 Saturday 18 January 2025 11:33:33 -0500 (0:00:00.094) 0:00:08.792 ****** ok: [managed-node1] => { "ansible_facts": { "__podman_quadlet_name": "quadlet-pod-pod", "__podman_quadlet_type": "pod", "__podman_rootless": false }, "changed": false } TASK [fedora.linux_system_roles.podman : Check user and group information] ***** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:57 Saturday 18 January 2025 11:33:34 -0500 (0:00:00.139) 0:00:08.932 ****** included: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml for managed-node1 TASK [fedora.linux_system_roles.podman : Get user information] ***************** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:2 Saturday 18 January 2025 11:33:34 -0500 (0:00:00.178) 0:00:09.110 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "'getent_passwd' not in ansible_facts or __podman_user not in ansible_facts['getent_passwd']", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user does not exist] ********** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:9 Saturday 18 January 2025 11:33:34 -0500 (0:00:00.095) 0:00:09.206 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "not ansible_facts[\"getent_passwd\"][__podman_user]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set group for podman user] ************ task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:16 Saturday 18 January 2025 11:33:34 -0500 (0:00:00.073) 0:00:09.279 ****** ok: [managed-node1] => { "ansible_facts": { "__podman_group": "0" }, "changed": false } TASK [fedora.linux_system_roles.podman : See if getsubids exists] ************** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:31 Saturday 18 January 2025 11:33:34 -0500 (0:00:00.073) 0:00:09.353 ****** ok: [managed-node1] => { "changed": false, "stat": { "atime": 1737217679.5789073, "attr_flags": "", "attributes": [], "block_size": 4096, "blocks": 32, "charset": "binary", "checksum": "89ab10a2a8fa81bcc0c1df0058f200469ce46f97", "ctime": 1737217674.8858979, "dev": 51714, "device_type": 0, "executable": true, "exists": true, "gid": 0, "gr_name": "root", "inode": 9160785, "isblk": false, "ischr": false, "isdir": false, "isfifo": false, "isgid": false, "islnk": false, "isreg": true, "issock": false, "isuid": false, "mimetype": "application/x-pie-executable", "mode": "0755", "mtime": 1730678400.0, "nlink": 1, "path": "/usr/bin/getsubids", "pw_name": "root", "readable": true, "rgrp": true, "roth": true, "rusr": true, "size": 15744, "uid": 0, "version": "1643853349", "wgrp": false, "woth": false, "writeable": true, "wusr": true, "xgrp": true, "xoth": true, "xusr": true } } TASK [fedora.linux_system_roles.podman : Check with getsubids for user subuids] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:42 Saturday 18 January 2025 11:33:34 -0500 (0:00:00.416) 0:00:09.770 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_user not in [\"root\", \"0\"]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Check with getsubids for user subgids] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:47 Saturday 18 January 2025 11:33:34 -0500 (0:00:00.045) 0:00:09.815 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_user not in [\"root\", \"0\"]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set user subuid and subgid info] ****** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:52 Saturday 18 January 2025 11:33:34 -0500 (0:00:00.051) 0:00:09.867 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_user not in [\"root\", \"0\"]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get subuid file] ********************** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:65 Saturday 18 January 2025 11:33:35 -0500 (0:00:00.052) 0:00:09.919 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get subgid file] ********************** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:70 Saturday 18 January 2025 11:33:35 -0500 (0:00:00.048) 0:00:09.967 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set user subuid and subgid info] ****** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:75 Saturday 18 January 2025 11:33:35 -0500 (0:00:00.053) 0:00:10.021 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user not in subuid file] ****** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:85 Saturday 18 January 2025 11:33:35 -0500 (0:00:00.053) 0:00:10.075 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user not in subgid file] ****** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:92 Saturday 18 January 2025 11:33:35 -0500 (0:00:00.079) 0:00:10.154 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set per-container variables part 3] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:62 Saturday 18 January 2025 11:33:35 -0500 (0:00:00.045) 0:00:10.200 ****** ok: [managed-node1] => { "ansible_facts": { "__podman_activate_systemd_unit": true, "__podman_images_found": [], "__podman_kube_yamls_raw": "", "__podman_service_name": "quadlet-pod-pod-pod.service", "__podman_systemd_scope": "system", "__podman_user_home_dir": "/root", "__podman_xdg_runtime_dir": "/run/user/0" }, "changed": false } TASK [fedora.linux_system_roles.podman : Set per-container variables part 4] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:73 Saturday 18 January 2025 11:33:35 -0500 (0:00:00.070) 0:00:10.271 ****** ok: [managed-node1] => { "ansible_facts": { "__podman_quadlet_path": "/etc/containers/systemd" }, "changed": false } TASK [fedora.linux_system_roles.podman : Get kube yaml contents] *************** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:77 Saturday 18 January 2025 11:33:35 -0500 (0:00:00.041) 0:00:10.313 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_kube_yamls_raw | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set per-container variables part 5] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:87 Saturday 18 January 2025 11:33:35 -0500 (0:00:00.032) 0:00:10.345 ****** ok: [managed-node1] => { "ansible_facts": { "__podman_images": [], "__podman_quadlet_file": "/etc/containers/systemd/quadlet-pod-pod.pod", "__podman_volumes": [] }, "changed": false } TASK [fedora.linux_system_roles.podman : Set per-container variables part 6] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:105 Saturday 18 January 2025 11:33:35 -0500 (0:00:00.071) 0:00:10.417 ****** ok: [managed-node1] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Cleanup quadlets] ********************* task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:112 Saturday 18 January 2025 11:33:35 -0500 (0:00:00.067) 0:00:10.484 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_state == \"absent\"", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Create and update quadlets] *********** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:116 Saturday 18 January 2025 11:33:35 -0500 (0:00:00.031) 0:00:10.516 ****** included: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml for managed-node1 TASK [fedora.linux_system_roles.podman : Manage linger] ************************ task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:2 Saturday 18 January 2025 11:33:35 -0500 (0:00:00.102) 0:00:10.619 ****** included: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/manage_linger.yml for managed-node1 TASK [fedora.linux_system_roles.podman : Enable linger if needed] ************** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/manage_linger.yml:12 Saturday 18 January 2025 11:33:35 -0500 (0:00:00.100) 0:00:10.719 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_rootless | bool", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Mark user as not yet needing to cancel linger] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/manage_linger.yml:18 Saturday 18 January 2025 11:33:35 -0500 (0:00:00.057) 0:00:10.777 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_rootless | bool", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Mark user for possible linger cancel] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/manage_linger.yml:22 Saturday 18 January 2025 11:33:35 -0500 (0:00:00.079) 0:00:10.857 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_rootless | bool", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Create host directories] ************** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:7 Saturday 18 January 2025 11:33:36 -0500 (0:00:00.056) 0:00:10.913 ****** skipping: [managed-node1] => { "changed": false, "skipped_reason": "No items in the list" } TASK [fedora.linux_system_roles.podman : Ensure container images are present] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:18 Saturday 18 January 2025 11:33:36 -0500 (0:00:00.063) 0:00:10.976 ****** skipping: [managed-node1] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Ensure the quadlet directory is present] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:39 Saturday 18 January 2025 11:33:36 -0500 (0:00:00.055) 0:00:11.031 ****** ok: [managed-node1] => { "changed": false, "gid": 0, "group": "root", "mode": "0755", "owner": "root", "path": "/etc/containers/systemd", "secontext": "system_u:object_r:etc_t:s0", "size": 6, "state": "directory", "uid": 0 } TASK [fedora.linux_system_roles.podman : Ensure quadlet file is copied] ******** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:48 Saturday 18 January 2025 11:33:36 -0500 (0:00:00.669) 0:00:11.700 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_quadlet_file_src | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Ensure quadlet file content is present] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:58 Saturday 18 January 2025 11:33:36 -0500 (0:00:00.050) 0:00:11.751 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_quadlet_str | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Ensure quadlet file is present] ******* task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:70 Saturday 18 January 2025 11:33:36 -0500 (0:00:00.045) 0:00:11.796 ****** changed: [managed-node1] => { "changed": true, "checksum": "1884c880482430d8bf2e944b003734fb8b7a462d", "dest": "/etc/containers/systemd/quadlet-pod-pod.pod", "gid": 0, "group": "root", "md5sum": "43c9e9c2ff3ad9cd27c1f2d12f03aee0", "mode": "0644", "owner": "root", "secontext": "system_u:object_r:etc_t:s0", "size": 70, "src": "/root/.ansible/tmp/ansible-tmp-1737218016.9384396-19502-222946066402839/.source.pod", "state": "file", "uid": 0 } TASK [fedora.linux_system_roles.podman : Reload systemctl] ********************* task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:82 Saturday 18 January 2025 11:33:37 -0500 (0:00:01.028) 0:00:12.825 ****** ok: [managed-node1] => { "changed": false, "name": null, "status": {} } TASK [fedora.linux_system_roles.podman : Start service] ************************ task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:110 Saturday 18 January 2025 11:33:39 -0500 (0:00:01.261) 0:00:14.087 ****** changed: [managed-node1] => { "changed": true, "name": "quadlet-pod-pod-pod.service", "state": "started", "status": { "AccessSELinuxContext": "system_u:object_r:systemd_unit_file_t:s0", "ActiveEnterTimestampMonotonic": "0", "ActiveExitTimestampMonotonic": "0", "ActiveState": "inactive", "After": "-.mount systemd-journald.socket network-online.target sysinit.target system.slice basic.target", "AllowIsolate": "no", "AssertResult": "no", "AssertTimestampMonotonic": "0", "Before": "shutdown.target", "BindLogSockets": "no", "BlockIOAccounting": "no", "BlockIOWeight": "[not set]", "CPUAccounting": "yes", "CPUAffinityFromNUMA": "no", "CPUQuotaPerSecUSec": "infinity", "CPUQuotaPeriodUSec": "infinity", "CPUSchedulingPolicy": "0", "CPUSchedulingPriority": "0", "CPUSchedulingResetOnFork": "no", "CPUShares": "[not set]", "CPUUsageNSec": "[not set]", "CPUWeight": "[not set]", "CacheDirectoryMode": "0755", "CanFreeze": "yes", "CanIsolate": "no", "CanLiveMount": "no", "CanReload": "no", "CanStart": "yes", "CanStop": "yes", "CapabilityBoundingSet": "cap_chown cap_dac_override cap_dac_read_search cap_fowner cap_fsetid cap_kill cap_setgid cap_setuid cap_setpcap cap_linux_immutable cap_net_bind_service cap_net_broadcast cap_net_admin cap_net_raw cap_ipc_lock cap_ipc_owner cap_sys_module cap_sys_rawio cap_sys_chroot cap_sys_ptrace cap_sys_pacct cap_sys_admin cap_sys_boot cap_sys_nice cap_sys_resource cap_sys_time cap_sys_tty_config cap_mknod cap_lease cap_audit_write cap_audit_control cap_setfcap cap_mac_override cap_mac_admin cap_syslog cap_wake_alarm cap_block_suspend cap_audit_read cap_perfmon cap_bpf cap_checkpoint_restore", "CleanResult": "success", "CollectMode": "inactive", "ConditionResult": "no", "ConditionTimestampMonotonic": "0", "ConfigurationDirectoryMode": "0755", "Conflicts": "shutdown.target", "ControlGroupId": "0", "ControlPID": "0", "CoredumpFilter": "0x33", "CoredumpReceive": "no", "DebugInvocation": "no", "DefaultDependencies": "yes", "DefaultMemoryLow": "0", "DefaultMemoryMin": "0", "DefaultStartupMemoryLow": "0", "Delegate": "no", "Description": "quadlet-pod-pod-pod.service", "DevicePolicy": "auto", "DynamicUser": "no", "EffectiveMemoryHigh": "3698229248", "EffectiveMemoryMax": "3698229248", "EffectiveTasksMax": "22365", "Environment": "PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service", "ExecMainCode": "0", "ExecMainExitTimestampMonotonic": "0", "ExecMainHandoffTimestampMonotonic": "0", "ExecMainPID": "0", "ExecMainStartTimestampMonotonic": "0", "ExecMainStatus": "0", "ExecStart": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman pod start --pod-id-file=/run/quadlet-pod-pod-pod.pod-id ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStartEx": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman pod start --pod-id-file=/run/quadlet-pod-pod-pod.pod-id ; flags= ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStartPre": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman pod create --infra-conmon-pidfile=/run/quadlet-pod-pod-pod.pid --pod-id-file=/run/quadlet-pod-pod-pod.pod-id --exit-policy=stop --replace --infra-name quadlet-pod-infra --name quadlet-pod ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStartPreEx": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman pod create --infra-conmon-pidfile=/run/quadlet-pod-pod-pod.pid --pod-id-file=/run/quadlet-pod-pod-pod.pod-id --exit-policy=stop --replace --infra-name quadlet-pod-infra --name quadlet-pod ; flags= ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStop": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman pod stop --pod-id-file=/run/quadlet-pod-pod-pod.pod-id --ignore --time=10 ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStopEx": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman pod stop --pod-id-file=/run/quadlet-pod-pod-pod.pod-id --ignore --time=10 ; flags= ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStopPost": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman pod rm --pod-id-file=/run/quadlet-pod-pod-pod.pod-id --ignore --force ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStopPostEx": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman pod rm --pod-id-file=/run/quadlet-pod-pod-pod.pod-id --ignore --force ; flags= ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExitType": "main", "ExtensionImagePolicy": "root=verity+signed+encrypted+unprotected+absent:usr=verity+signed+encrypted+unprotected+absent:home=encrypted+unprotected+absent:srv=encrypted+unprotected+absent:tmp=encrypted+unprotected+absent:var=encrypted+unprotected+absent", "FailureAction": "none", "FileDescriptorStoreMax": "0", "FileDescriptorStorePreserve": "restart", "FinalKillSignal": "9", "FragmentPath": "/run/systemd/generator/quadlet-pod-pod-pod.service", "FreezerState": "running", "GID": "[not set]", "GuessMainPID": "yes", "IOAccounting": "no", "IOReadBytes": "[not set]", "IOReadOperations": "[not set]", "IOSchedulingClass": "2", "IOSchedulingPriority": "4", "IOWeight": "[not set]", "IOWriteBytes": "[not set]", "IOWriteOperations": "[not set]", "IPAccounting": "no", "IPEgressBytes": "[no data]", "IPEgressPackets": "[no data]", "IPIngressBytes": "[no data]", "IPIngressPackets": "[no data]", "Id": "quadlet-pod-pod-pod.service", "IgnoreOnIsolate": "no", "IgnoreSIGPIPE": "yes", "InactiveEnterTimestampMonotonic": "0", "InactiveExitTimestampMonotonic": "0", "JobRunningTimeoutUSec": "infinity", "JobTimeoutAction": "none", "JobTimeoutUSec": "infinity", "KeyringMode": "private", "KillMode": "control-group", "KillSignal": "15", "LimitAS": "infinity", "LimitASSoft": "infinity", "LimitCORE": "infinity", "LimitCORESoft": "infinity", "LimitCPU": "infinity", "LimitCPUSoft": "infinity", "LimitDATA": "infinity", "LimitDATASoft": "infinity", "LimitFSIZE": "infinity", "LimitFSIZESoft": "infinity", "LimitLOCKS": "infinity", "LimitLOCKSSoft": "infinity", "LimitMEMLOCK": "8388608", "LimitMEMLOCKSoft": "8388608", "LimitMSGQUEUE": "819200", "LimitMSGQUEUESoft": "819200", "LimitNICE": "0", "LimitNICESoft": "0", "LimitNOFILE": "524288", "LimitNOFILESoft": "1024", "LimitNPROC": "13978", "LimitNPROCSoft": "13978", "LimitRSS": "infinity", "LimitRSSSoft": "infinity", "LimitRTPRIO": "0", "LimitRTPRIOSoft": "0", "LimitRTTIME": "infinity", "LimitRTTIMESoft": "infinity", "LimitSIGPENDING": "13978", "LimitSIGPENDINGSoft": "13978", "LimitSTACK": "infinity", "LimitSTACKSoft": "8388608", "LiveMountResult": "success", "LoadState": "loaded", "LockPersonality": "no", "LogLevelMax": "-1", "LogRateLimitBurst": "0", "LogRateLimitIntervalUSec": "0", "LogsDirectoryMode": "0755", "MainPID": "0", "ManagedOOMMemoryPressure": "auto", "ManagedOOMMemoryPressureDurationUSec": "[not set]", "ManagedOOMMemoryPressureLimit": "0", "ManagedOOMPreference": "none", "ManagedOOMSwap": "auto", "MemoryAccounting": "yes", "MemoryAvailable": "3288047616", "MemoryCurrent": "[not set]", "MemoryDenyWriteExecute": "no", "MemoryHigh": "infinity", "MemoryKSM": "no", "MemoryLimit": "infinity", "MemoryLow": "0", "MemoryMax": "infinity", "MemoryMin": "0", "MemoryPeak": "[not set]", "MemoryPressureThresholdUSec": "200ms", "MemoryPressureWatch": "auto", "MemorySwapCurrent": "[not set]", "MemorySwapMax": "infinity", "MemorySwapPeak": "[not set]", "MemoryZSwapCurrent": "[not set]", "MemoryZSwapMax": "infinity", "MemoryZSwapWriteback": "yes", "MountAPIVFS": "no", "MountImagePolicy": "root=verity+signed+encrypted+unprotected+absent:usr=verity+signed+encrypted+unprotected+absent:home=encrypted+unprotected+absent:srv=encrypted+unprotected+absent:tmp=encrypted+unprotected+absent:var=encrypted+unprotected+absent", "NFileDescriptorStore": "0", "NRestarts": "0", "NUMAPolicy": "n/a", "Names": "quadlet-pod-pod-pod.service", "NeedDaemonReload": "no", "Nice": "0", "NoNewPrivileges": "no", "NonBlocking": "no", "NotifyAccess": "none", "OOMPolicy": "stop", "OOMScoreAdjust": "0", "OnFailureJobMode": "replace", "OnSuccessJobMode": "fail", "PIDFile": "/run/quadlet-pod-pod-pod.pid", "Perpetual": "no", "PrivateDevices": "no", "PrivateIPC": "no", "PrivateMounts": "no", "PrivateNetwork": "no", "PrivatePIDs": "no", "PrivateTmp": "no", "PrivateTmpEx": "no", "PrivateUsers": "no", "PrivateUsersEx": "no", "ProcSubset": "all", "ProtectClock": "no", "ProtectControlGroups": "no", "ProtectControlGroupsEx": "no", "ProtectHome": "no", "ProtectHostname": "no", "ProtectKernelLogs": "no", "ProtectKernelModules": "no", "ProtectKernelTunables": "no", "ProtectProc": "default", "ProtectSystem": "no", "RefuseManualStart": "no", "RefuseManualStop": "no", "ReloadResult": "success", "ReloadSignal": "1", "RemainAfterExit": "no", "RemoveIPC": "no", "Requires": "-.mount sysinit.target system.slice", "RequiresMountsFor": "/run/containers", "Restart": "on-failure", "RestartKillSignal": "15", "RestartMaxDelayUSec": "infinity", "RestartMode": "normal", "RestartSteps": "0", "RestartUSec": "100ms", "RestartUSecNext": "100ms", "RestrictNamespaces": "no", "RestrictRealtime": "no", "RestrictSUIDSGID": "no", "Result": "success", "RootDirectoryStartOnly": "no", "RootEphemeral": "no", "RootImagePolicy": "root=verity+signed+encrypted+unprotected+absent:usr=verity+signed+encrypted+unprotected+absent:home=encrypted+unprotected+absent:srv=encrypted+unprotected+absent:tmp=encrypted+unprotected+absent:var=encrypted+unprotected+absent", "RuntimeDirectoryMode": "0755", "RuntimeDirectoryPreserve": "no", "RuntimeMaxUSec": "infinity", "RuntimeRandomizedExtraUSec": "0", "SameProcessGroup": "no", "SecureBits": "0", "SendSIGHUP": "no", "SendSIGKILL": "yes", "SetLoginEnvironment": "no", "Slice": "system.slice", "SourcePath": "/etc/containers/systemd/quadlet-pod-pod.pod", "StandardError": "inherit", "StandardInput": "null", "StandardOutput": "journal", "StartLimitAction": "none", "StartLimitBurst": "5", "StartLimitIntervalUSec": "10s", "StartupBlockIOWeight": "[not set]", "StartupCPUShares": "[not set]", "StartupCPUWeight": "[not set]", "StartupIOWeight": "[not set]", "StartupMemoryHigh": "infinity", "StartupMemoryLow": "0", "StartupMemoryMax": "infinity", "StartupMemorySwapMax": "infinity", "StartupMemoryZSwapMax": "infinity", "StateChangeTimestampMonotonic": "0", "StateDirectoryMode": "0755", "StatusErrno": "0", "StopWhenUnneeded": "no", "SubState": "dead", "SuccessAction": "none", "SurviveFinalKillSignal": "no", "SyslogFacility": "3", "SyslogIdentifier": "quadlet-pod-pod-pod", "SyslogLevel": "6", "SyslogLevelPrefix": "yes", "SyslogPriority": "30", "SystemCallErrorNumber": "2147483646", "TTYReset": "no", "TTYVHangup": "no", "TTYVTDisallocate": "no", "TasksAccounting": "yes", "TasksCurrent": "[not set]", "TasksMax": "22365", "TimeoutAbortUSec": "1min 30s", "TimeoutCleanUSec": "infinity", "TimeoutStartFailureMode": "terminate", "TimeoutStartUSec": "1min 30s", "TimeoutStopFailureMode": "terminate", "TimeoutStopUSec": "1min 30s", "TimerSlackNSec": "50000", "Transient": "no", "Type": "forking", "UID": "[not set]", "UMask": "0022", "UnitFilePreset": "disabled", "UnitFileState": "generated", "UtmpMode": "init", "Wants": "network-online.target", "WatchdogSignal": "6", "WatchdogTimestampMonotonic": "0", "WatchdogUSec": "infinity" } } TASK [fedora.linux_system_roles.podman : Restart service] ********************** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:125 Saturday 18 January 2025 11:33:40 -0500 (0:00:01.270) 0:00:15.357 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "not __podman_service_started is changed", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set per-container variables part 0] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:14 Saturday 18 January 2025 11:33:40 -0500 (0:00:00.053) 0:00:15.411 ****** ok: [managed-node1] => { "ansible_facts": { "__podman_quadlet_file_src": "", "__podman_quadlet_spec": { "Container": { "ContainerName": "quadlet-pod-container", "Exec": "/bin/busybox-extras httpd -f -p 80", "Image": "quay.io/libpod/testimage:20210610", "Pod": "quadlet-pod-pod.pod" }, "Install": { "WantedBy": "default.target" } }, "__podman_quadlet_str": "", "__podman_quadlet_template_src": "" }, "changed": false } TASK [fedora.linux_system_roles.podman : Set per-container variables part 1] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:25 Saturday 18 January 2025 11:33:40 -0500 (0:00:00.089) 0:00:15.500 ****** ok: [managed-node1] => { "ansible_facts": { "__podman_continue_if_pull_fails": false, "__podman_pull_image": true, "__podman_state": "created", "__podman_systemd_unit_scope": "", "__podman_user": "root" }, "changed": false } TASK [fedora.linux_system_roles.podman : Fail if no quadlet spec is given] ***** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:35 Saturday 18 January 2025 11:33:40 -0500 (0:00:00.061) 0:00:15.561 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_quadlet_spec | length == 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set per-container variables part 2] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:48 Saturday 18 January 2025 11:33:40 -0500 (0:00:00.049) 0:00:15.611 ****** ok: [managed-node1] => { "ansible_facts": { "__podman_quadlet_name": "quadlet-pod-container", "__podman_quadlet_type": "container", "__podman_rootless": false }, "changed": false } TASK [fedora.linux_system_roles.podman : Check user and group information] ***** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:57 Saturday 18 January 2025 11:33:40 -0500 (0:00:00.114) 0:00:15.726 ****** included: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml for managed-node1 TASK [fedora.linux_system_roles.podman : Get user information] ***************** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:2 Saturday 18 January 2025 11:33:40 -0500 (0:00:00.106) 0:00:15.833 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "'getent_passwd' not in ansible_facts or __podman_user not in ansible_facts['getent_passwd']", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user does not exist] ********** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:9 Saturday 18 January 2025 11:33:40 -0500 (0:00:00.055) 0:00:15.888 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "not ansible_facts[\"getent_passwd\"][__podman_user]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set group for podman user] ************ task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:16 Saturday 18 January 2025 11:33:41 -0500 (0:00:00.070) 0:00:15.959 ****** ok: [managed-node1] => { "ansible_facts": { "__podman_group": "0" }, "changed": false } TASK [fedora.linux_system_roles.podman : See if getsubids exists] ************** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:31 Saturday 18 January 2025 11:33:41 -0500 (0:00:00.089) 0:00:16.048 ****** ok: [managed-node1] => { "changed": false, "stat": { "atime": 1737217679.5789073, "attr_flags": "", "attributes": [], "block_size": 4096, "blocks": 32, "charset": "binary", "checksum": "89ab10a2a8fa81bcc0c1df0058f200469ce46f97", "ctime": 1737217674.8858979, "dev": 51714, "device_type": 0, "executable": true, "exists": true, "gid": 0, "gr_name": "root", "inode": 9160785, "isblk": false, "ischr": false, "isdir": false, "isfifo": false, "isgid": false, "islnk": false, "isreg": true, "issock": false, "isuid": false, "mimetype": "application/x-pie-executable", "mode": "0755", "mtime": 1730678400.0, "nlink": 1, "path": "/usr/bin/getsubids", "pw_name": "root", "readable": true, "rgrp": true, "roth": true, "rusr": true, "size": 15744, "uid": 0, "version": "1643853349", "wgrp": false, "woth": false, "writeable": true, "wusr": true, "xgrp": true, "xoth": true, "xusr": true } } TASK [fedora.linux_system_roles.podman : Check with getsubids for user subuids] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:42 Saturday 18 January 2025 11:33:41 -0500 (0:00:00.442) 0:00:16.490 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_user not in [\"root\", \"0\"]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Check with getsubids for user subgids] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:47 Saturday 18 January 2025 11:33:41 -0500 (0:00:00.046) 0:00:16.537 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_user not in [\"root\", \"0\"]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set user subuid and subgid info] ****** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:52 Saturday 18 January 2025 11:33:41 -0500 (0:00:00.051) 0:00:16.588 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_user not in [\"root\", \"0\"]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get subuid file] ********************** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:65 Saturday 18 January 2025 11:33:41 -0500 (0:00:00.049) 0:00:16.638 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get subgid file] ********************** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:70 Saturday 18 January 2025 11:33:41 -0500 (0:00:00.054) 0:00:16.692 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set user subuid and subgid info] ****** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:75 Saturday 18 January 2025 11:33:41 -0500 (0:00:00.051) 0:00:16.744 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user not in subuid file] ****** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:85 Saturday 18 January 2025 11:33:41 -0500 (0:00:00.077) 0:00:16.822 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user not in subgid file] ****** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:92 Saturday 18 January 2025 11:33:41 -0500 (0:00:00.053) 0:00:16.875 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set per-container variables part 3] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:62 Saturday 18 January 2025 11:33:42 -0500 (0:00:00.069) 0:00:16.944 ****** ok: [managed-node1] => { "ansible_facts": { "__podman_activate_systemd_unit": true, "__podman_images_found": [ "quay.io/libpod/testimage:20210610" ], "__podman_kube_yamls_raw": "", "__podman_service_name": "quadlet-pod-container.service", "__podman_systemd_scope": "system", "__podman_user_home_dir": "/root", "__podman_xdg_runtime_dir": "/run/user/0" }, "changed": false } TASK [fedora.linux_system_roles.podman : Set per-container variables part 4] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:73 Saturday 18 January 2025 11:33:42 -0500 (0:00:00.093) 0:00:17.038 ****** ok: [managed-node1] => { "ansible_facts": { "__podman_quadlet_path": "/etc/containers/systemd" }, "changed": false } TASK [fedora.linux_system_roles.podman : Get kube yaml contents] *************** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:77 Saturday 18 January 2025 11:33:42 -0500 (0:00:00.068) 0:00:17.107 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_kube_yamls_raw | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set per-container variables part 5] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:87 Saturday 18 January 2025 11:33:42 -0500 (0:00:00.048) 0:00:17.155 ****** ok: [managed-node1] => { "ansible_facts": { "__podman_images": [ "quay.io/libpod/testimage:20210610" ], "__podman_quadlet_file": "/etc/containers/systemd/quadlet-pod-container.container", "__podman_volumes": [] }, "changed": false } TASK [fedora.linux_system_roles.podman : Set per-container variables part 6] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:105 Saturday 18 January 2025 11:33:42 -0500 (0:00:00.120) 0:00:17.276 ****** ok: [managed-node1] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Cleanup quadlets] ********************* task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:112 Saturday 18 January 2025 11:33:42 -0500 (0:00:00.069) 0:00:17.346 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_state == \"absent\"", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Create and update quadlets] *********** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:116 Saturday 18 January 2025 11:33:42 -0500 (0:00:00.061) 0:00:17.407 ****** included: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml for managed-node1 TASK [fedora.linux_system_roles.podman : Manage linger] ************************ task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:2 Saturday 18 January 2025 11:33:42 -0500 (0:00:00.190) 0:00:17.597 ****** included: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/manage_linger.yml for managed-node1 TASK [fedora.linux_system_roles.podman : Enable linger if needed] ************** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/manage_linger.yml:12 Saturday 18 January 2025 11:33:42 -0500 (0:00:00.102) 0:00:17.700 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_rootless | bool", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Mark user as not yet needing to cancel linger] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/manage_linger.yml:18 Saturday 18 January 2025 11:33:42 -0500 (0:00:00.045) 0:00:17.745 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_rootless | bool", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Mark user for possible linger cancel] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/manage_linger.yml:22 Saturday 18 January 2025 11:33:42 -0500 (0:00:00.047) 0:00:17.793 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_rootless | bool", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Create host directories] ************** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:7 Saturday 18 January 2025 11:33:42 -0500 (0:00:00.064) 0:00:17.857 ****** skipping: [managed-node1] => { "changed": false, "skipped_reason": "No items in the list" } TASK [fedora.linux_system_roles.podman : Ensure container images are present] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:18 Saturday 18 January 2025 11:33:42 -0500 (0:00:00.043) 0:00:17.901 ****** changed: [managed-node1] => (item=None) => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": true } changed: [managed-node1] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": true } TASK [fedora.linux_system_roles.podman : Ensure the quadlet directory is present] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:39 Saturday 18 January 2025 11:33:47 -0500 (0:00:04.125) 0:00:22.027 ****** ok: [managed-node1] => { "changed": false, "gid": 0, "group": "root", "mode": "0755", "owner": "root", "path": "/etc/containers/systemd", "secontext": "system_u:object_r:etc_t:s0", "size": 33, "state": "directory", "uid": 0 } TASK [fedora.linux_system_roles.podman : Ensure quadlet file is copied] ******** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:48 Saturday 18 January 2025 11:33:47 -0500 (0:00:00.407) 0:00:22.434 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_quadlet_file_src | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Ensure quadlet file content is present] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:58 Saturday 18 January 2025 11:33:47 -0500 (0:00:00.031) 0:00:22.465 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_quadlet_str | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Ensure quadlet file is present] ******* task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:70 Saturday 18 January 2025 11:33:47 -0500 (0:00:00.028) 0:00:22.493 ****** changed: [managed-node1] => { "changed": true, "checksum": "f0b5c8159fc3c65bf9310a371751609e4c1ba4c3", "dest": "/etc/containers/systemd/quadlet-pod-container.container", "gid": 0, "group": "root", "md5sum": "daaf6e904ff3c17edeb801084cfe256f", "mode": "0644", "owner": "root", "secontext": "system_u:object_r:etc_t:s0", "size": 230, "src": "/root/.ansible/tmp/ansible-tmp-1737218027.6281447-19906-14489979928832/.source.container", "state": "file", "uid": 0 } TASK [fedora.linux_system_roles.podman : Reload systemctl] ********************* task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:82 Saturday 18 January 2025 11:33:48 -0500 (0:00:00.813) 0:00:23.306 ****** ok: [managed-node1] => { "changed": false, "name": null, "status": {} } TASK [fedora.linux_system_roles.podman : Start service] ************************ task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:110 Saturday 18 January 2025 11:33:49 -0500 (0:00:00.780) 0:00:24.087 ****** fatal: [managed-node1]: FAILED! => { "changed": false } MSG: Unable to start service quadlet-pod-container.service: A dependency job for quadlet-pod-container.service failed. See 'journalctl -xe' for details. TASK [Debug3] ****************************************************************** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:127 Saturday 18 January 2025 11:33:49 -0500 (0:00:00.578) 0:00:24.665 ****** ok: [managed-node1] => { "changed": false, "cmd": "set -x\nset -o pipefail\nexec 1>&2\n#podman volume rm --all\n#podman network prune -f\npodman volume ls\npodman network ls\npodman secret ls\npodman container ls\npodman pod ls\npodman images\nsystemctl list-units | grep quadlet\nsystemctl list-unit-files | grep quadlet\nls -alrtF /etc/containers/systemd\n/usr/libexec/podman/quadlet -dryrun -v -no-kmsg-log\n", "delta": "0:00:00.619478", "end": "2025-01-18 11:33:50.701670", "rc": 0, "start": "2025-01-18 11:33:50.082192" } STDERR: + set -o pipefail + exec + podman volume ls DRIVER VOLUME NAME local 5b91e845725b9a38c4db654f5a648d0520f98a70531e23c7c4ebc3dd02be25a3 + podman network ls NETWORK ID NAME DRIVER 2f259bab93aa podman bridge + podman secret ls ID NAME DRIVER CREATED UPDATED + podman container ls CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + podman pod ls POD ID NAME STATUS CREATED INFRA ID # OF CONTAINERS + podman images REPOSITORY TAG IMAGE ID CREATED SIZE localhost/podman-pause 5.3.1-1733097600 19fab7a77b44 11 seconds ago 701 kB quay.io/libpod/registry 2.8.2 0030ba3d620c 17 months ago 24.6 MB quay.io/libpod/testimage 20210610 9f9ec7f2fdef 3 years ago 7.99 MB + systemctl list-units + grep quadlet ● quadlet-pod-pod-pod.service loaded failed failed quadlet-pod-pod-pod.service + systemctl list-unit-files + grep quadlet quadlet-pod-container.service generated - quadlet-pod-pod-pod.service generated - + ls -alrtF /etc/containers/systemd total 8 drwxr-xr-x. 8 root root 150 Jan 18 11:28 ../ -rw-r--r--. 1 root root 70 Jan 18 11:33 quadlet-pod-pod.pod -rw-r--r--. 1 root root 230 Jan 18 11:33 quadlet-pod-container.container drwxr-xr-x. 2 root root 72 Jan 18 11:33 ./ + /usr/libexec/podman/quadlet -dryrun -v -no-kmsg-log quadlet-generator[16119]: Loading source unit file /etc/containers/systemd/quadlet-pod-container.container quadlet-generator[16119]: Loading source unit file /etc/containers/systemd/quadlet-pod-pod.pod ---quadlet-pod-container.service--- # # Ansible managed # # system_role:podman [Install] WantedBy=default.target [X-Container] Image=quay.io/libpod/testimage:20210610 ContainerName=quadlet-pod-container Pod=quadlet-pod-pod.pod Exec=/bin/busybox-extras httpd -f -p 80 [Unit] Wants=network-online.target After=network-online.target SourcePath=/etc/containers/systemd/quadlet-pod-container.container RequiresMountsFor=%t/containers BindsTo=quadlet-pod-pod-pod.service After=quadlet-pod-pod-pod.service [Service] Environment=PODMAN_SYSTEMD_UNIT=%n KillMode=mixed ExecStop=/usr/bin/podman rm -v -f -i --cidfile=%t/%N.cid ExecStopPost=-/usr/bin/podman rm -v -f -i --cidfile=%t/%N.cid Delegate=yes Type=notify NotifyAccess=all SyslogIdentifier=%N ExecStart=/usr/bin/podman run --name quadlet-pod-container --cidfile=%t/%N.cid --replace --rm --cgroups=split --sdnotify=conmon -d --pod-id-file %t/quadlet-pod-pod-pod.pod-id quay.io/libpod/testimage:20210610 /bin/busybox-extras httpd -f -p 80 ---quadlet-pod-pod-pod.service--- # # Ansible managed # # system_role:podman [X-Pod] PodName=quadlet-pod [Unit] Wants=network-online.target After=network-online.target SourcePath=/etc/containers/systemd/quadlet-pod-pod.pod RequiresMountsFor=%t/containers Wants=quadlet-pod-container.service Before=quadlet-pod-container.service [Service] SyslogIdentifier=%N ExecStart=/usr/bin/podman pod start --pod-id-file=%t/%N.pod-id ExecStop=/usr/bin/podman pod stop --pod-id-file=%t/%N.pod-id --ignore --time=10 ExecStopPost=/usr/bin/podman pod rm --pod-id-file=%t/%N.pod-id --ignore --force ExecStartPre=/usr/bin/podman pod create --infra-conmon-pidfile=%t/%N.pid --pod-id-file=%t/%N.pod-id --exit-policy=stop --replace --infra-name quadlet-pod-infra --name quadlet-pod Environment=PODMAN_SYSTEMD_UNIT=%n Type=forking Restart=on-failure PIDFile=%t/%N.pid TASK [Check AVCs] ************************************************************** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:146 Saturday 18 January 2025 11:33:50 -0500 (0:00:01.018) 0:00:25.684 ****** ok: [managed-node1] => { "changed": false, "cmd": [ "grep", "type=AVC", "/var/log/audit/audit.log" ], "delta": "0:00:00.004995", "end": "2025-01-18 11:33:51.094331", "failed_when_result": false, "rc": 0, "start": "2025-01-18 11:33:51.089336" } STDOUT: type=AVC msg=audit(1737217676.345:597): avc: denied { read } for pid=7749 comm="systemd-ssh-gen" name="vsock" dev="devtmpfs" ino=264 scontext=system_u:system_r:init_t:s0 tcontext=system_u:object_r:vsock_device_t:s0 tclass=chr_file permissive=0 type=AVC msg=audit(1737217686.500:938): avc: denied { read } for pid=9445 comm="systemd-ssh-gen" name="vsock" dev="devtmpfs" ino=264 scontext=system_u:system_r:init_t:s0 tcontext=system_u:object_r:vsock_device_t:s0 tclass=chr_file permissive=0 type=AVC msg=audit(1737217686.966:964): avc: denied { read } for pid=9507 comm="systemd-ssh-gen" name="vsock" dev="devtmpfs" ino=264 scontext=system_u:system_r:init_t:s0 tcontext=system_u:object_r:vsock_device_t:s0 tclass=chr_file permissive=0 type=AVC msg=audit(1737217689.414:1091): avc: denied { read } for pid=9963 comm="systemd-ssh-gen" name="vsock" dev="devtmpfs" ino=264 scontext=system_u:system_r:init_t:s0 tcontext=system_u:object_r:vsock_device_t:s0 tclass=chr_file permissive=0 type=AVC msg=audit(1737217703.610:1683): avc: denied { read write } for pid=12581 comm="entrypoint.sh" path="/dev/null" dev="tmpfs" ino=5 scontext=system_u:system_r:container_t:s0:c645,c999 tcontext=system_u:object_r:container_file_t:s0:c645,c999 tclass=chr_file permissive=0 type=AVC msg=audit(1737217703.611:1684): avc: denied { read } for pid=12581 comm="entrypoint.sh" path="/lib/ld-musl-x86_64.so.1" dev="xvda2" ino=486539486 scontext=system_u:system_r:container_t:s0:c645,c999 tcontext=unconfined_u:object_r:var_lib_t:s0 tclass=file permissive=0 type=AVC msg=audit(1737217703.611:1685): avc: denied { read } for pid=12581 comm="entrypoint.sh" path="/bin/busybox" dev="xvda2" ino=335544544 scontext=system_u:system_r:container_t:s0:c645,c999 tcontext=unconfined_u:object_r:var_lib_t:s0 tclass=file permissive=0 type=AVC msg=audit(1737218018.922:2091): avc: denied { read } for pid=14418 comm="systemd-ssh-gen" name="vsock" dev="devtmpfs" ino=264 scontext=system_u:system_r:init_t:s0 tcontext=system_u:object_r:vsock_device_t:s0 tclass=chr_file permissive=0 type=AVC msg=audit(1737218020.266:2153): avc: denied { transition } for pid=14662 comm="3" path="/catatonit" dev="overlay" ino=549454047 scontext=system_u:system_r:unconfined_service_t:s0 tcontext=system_u:system_r:container_t:s0:c116,c615 tclass=process permissive=0 type=AVC msg=audit(1737218021.025:2171): avc: denied { transition } for pid=14761 comm="3" path="/catatonit" dev="overlay" ino=549454047 scontext=system_u:system_r:unconfined_service_t:s0 tcontext=system_u:system_r:container_t:s0:c63,c1019 tclass=process permissive=0 type=AVC msg=audit(1737218021.834:2212): avc: denied { transition } for pid=14966 comm="3" path="/catatonit" dev="overlay" ino=549454047 scontext=system_u:system_r:unconfined_service_t:s0 tcontext=system_u:system_r:container_t:s0:c109,c118 tclass=process permissive=0 type=AVC msg=audit(1737218022.578:2223): avc: denied { transition } for pid=15041 comm="3" path="/catatonit" dev="overlay" ino=549454047 scontext=system_u:system_r:unconfined_service_t:s0 tcontext=system_u:system_r:container_t:s0:c160,c753 tclass=process permissive=0 type=AVC msg=audit(1737218023.365:2257): avc: denied { transition } for pid=15215 comm="3" path="/catatonit" dev="overlay" ino=549454047 scontext=system_u:system_r:unconfined_service_t:s0 tcontext=system_u:system_r:container_t:s0:c459,c516 tclass=process permissive=0 type=AVC msg=audit(1737218028.987:2380): avc: denied { read } for pid=15846 comm="systemd-ssh-gen" name="vsock" dev="devtmpfs" ino=264 scontext=system_u:system_r:init_t:s0 tcontext=system_u:object_r:vsock_device_t:s0 tclass=chr_file permissive=0 TASK [Dump journal] ************************************************************ task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:151 Saturday 18 January 2025 11:33:51 -0500 (0:00:00.405) 0:00:26.089 ****** fatal: [managed-node1]: FAILED! => { "changed": false, "cmd": [ "journalctl", "-ex" ], "delta": "0:00:00.028061", "end": "2025-01-18 11:33:51.525278", "failed_when_result": true, "rc": 0, "start": "2025-01-18 11:33:51.497217" } STDOUT: Jan 18 11:23:39 localhost systemd-logind[661]: Watching system buttons on /dev/input/event2 (AT Translated Set 2 keyboard) Jan 18 11:23:39 localhost systemd[1]: Started systemd-logind.service - User Login Management. ░░ Subject: A start job for unit systemd-logind.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-logind.service has finished successfully. ░░ ░░ The job identifier is 275. Jan 18 11:23:40 localhost chronyd[673]: chronyd version 4.6.1 starting (+CMDMON +NTP +REFCLOCK +RTC +PRIVDROP +SCFILTER +SIGND +ASYNCDNS +NTS +SECHASH +IPV6 +DEBUG) Jan 18 11:23:40 localhost chronyd[673]: Frequency 0.000 +/- 1000000.000 ppm read from /var/lib/chrony/drift Jan 18 11:23:40 localhost chronyd[673]: Loaded seccomp filter (level 2) Jan 18 11:23:40 localhost systemd[1]: Started chronyd.service - NTP client/server. ░░ Subject: A start job for unit chronyd.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit chronyd.service has finished successfully. ░░ ░░ The job identifier is 226. Jan 18 11:23:40 localhost rngd[660]: Disabling 7: PKCS11 Entropy generator (pkcs11) Jan 18 11:23:40 localhost rngd[660]: Disabling 5: NIST Network Entropy Beacon (nist) Jan 18 11:23:40 localhost rngd[660]: Disabling 9: Qrypt quantum entropy beacon (qrypt) Jan 18 11:23:40 localhost rngd[660]: Disabling 10: Named pipe entropy input (namedpipe) Jan 18 11:23:40 localhost rngd[660]: Initializing available sources Jan 18 11:23:40 localhost rngd[660]: [hwrng ]: Initialization Failed Jan 18 11:23:40 localhost rngd[660]: [rdrand]: Enabling RDRAND rng support Jan 18 11:23:40 localhost rngd[660]: [rdrand]: Initialized Jan 18 11:23:40 localhost rngd[660]: [jitter]: JITTER timeout set to 5 sec Jan 18 11:23:40 localhost rngd[660]: [jitter]: Initializing AES buffer Jan 18 11:23:43 localhost cloud-init[680]: Cloud-init v. 24.1.4-21.el10 running 'init-local' at Sat, 18 Jan 2025 16:23:43 +0000. Up 17.00 seconds. Jan 18 11:23:43 localhost dhcpcd[682]: dhcpcd-10.0.6 starting Jan 18 11:23:44 localhost kernel: 8021q: 802.1Q VLAN Support v1.8 Jan 18 11:23:44 localhost systemd[1]: Listening on systemd-rfkill.socket - Load/Save RF Kill Switch Status /dev/rfkill Watch. ░░ Subject: A start job for unit systemd-rfkill.socket has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-rfkill.socket has finished successfully. ░░ ░░ The job identifier is 309. Jan 18 11:23:44 localhost kernel: cfg80211: Loading compiled-in X.509 certificates for regulatory database Jan 18 11:23:44 localhost kernel: Loaded X.509 cert 'sforshee: 00b28ddf47aef9cea7' Jan 18 11:23:44 localhost kernel: Loaded X.509 cert 'wens: 61c038651aabdcf94bd0ac7ff06c7248db18c600' Jan 18 11:23:44 localhost kernel: platform regulatory.0: Direct firmware load for regulatory.db failed with error -2 Jan 18 11:23:44 localhost kernel: cfg80211: failed to load regulatory.db Jan 18 11:23:44 localhost dhcpcd[685]: DUID 00:01:00:01:2f:1e:92:10:0a:ff:d9:eb:f1:55 Jan 18 11:23:44 localhost dhcpcd[685]: eth0: IAID d9:eb:f1:55 Jan 18 11:23:44 localhost dhcpcd[685]: eth0: soliciting a DHCP lease Jan 18 11:23:44 localhost dhcpcd[685]: eth0: offered 10.31.12.161 from 10.31.12.1 Jan 18 11:23:44 localhost dhcpcd[685]: eth0: leased 10.31.12.161 for 3600 seconds Jan 18 11:23:44 localhost dhcpcd[685]: eth0: adding route to 10.31.12.0/22 Jan 18 11:23:44 localhost dhcpcd[685]: eth0: adding default route via 10.31.12.1 Jan 18 11:23:44 localhost dhcpcd[685]: control command: /usr/sbin/dhcpcd --dumplease --ipv4only eth0 Jan 18 11:23:45 localhost rngd[660]: [jitter]: Unable to obtain AES key, disabling JITTER source Jan 18 11:23:45 localhost rngd[660]: [jitter]: Initialization Failed Jan 18 11:23:45 localhost rngd[660]: Process privileges have been dropped to 2:2 Jan 18 11:23:45 localhost systemd[1]: Starting systemd-hostnamed.service - Hostname Service... ░░ Subject: A start job for unit systemd-hostnamed.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-hostnamed.service has begun execution. ░░ ░░ The job identifier is 318. Jan 18 11:23:45 localhost systemd[1]: Started systemd-hostnamed.service - Hostname Service. ░░ Subject: A start job for unit systemd-hostnamed.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-hostnamed.service has finished successfully. ░░ ░░ The job identifier is 318. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd-hostnamed[704]: Hostname set to (static) Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Finished cloud-init-local.service - Initial cloud-init job (pre-networking). ░░ Subject: A start job for unit cloud-init-local.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-init-local.service has finished successfully. ░░ ░░ The job identifier is 271. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Reached target network-pre.target - Preparation for Network. ░░ Subject: A start job for unit network-pre.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit network-pre.target has finished successfully. ░░ ░░ The job identifier is 153. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting NetworkManager.service - Network Manager... ░░ Subject: A start job for unit NetworkManager.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager.service has begun execution. ░░ ░░ The job identifier is 218. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.4761] NetworkManager (version 1.51.5-1.el10) is starting... (boot:a3460bff-606e-4f55-935e-5075ab026950) Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.4763] Read config: /etc/NetworkManager/NetworkManager.conf, /etc/NetworkManager/conf.d/30-cloud-init-ip6-addr-gen-mode.conf Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.4890] manager[0x55800d0bdac0]: monitoring kernel firmware directory '/lib/firmware'. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.4924] hostname: hostname: using hostnamed Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.4924] hostname: static hostname changed from (none) to "ip-10-31-12-161.us-east-1.aws.redhat.com" Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.4928] dns-mgr: init: dns=default,systemd-resolved rc-manager=symlink (auto) Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.4932] manager[0x55800d0bdac0]: rfkill: Wi-Fi hardware radio set enabled Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.4932] manager[0x55800d0bdac0]: rfkill: WWAN hardware radio set enabled Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.4991] manager: rfkill: Wi-Fi enabled by radio killswitch; enabled by state file Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.4992] manager: rfkill: WWAN enabled by radio killswitch; enabled by state file Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.4992] manager: Networking is enabled by state file Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5008] settings: Loaded settings plugin: keyfile (internal) Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5058] dhcp: init: Using DHCP client 'internal' Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5061] manager: (lo): new Loopback device (/org/freedesktop/NetworkManager/Devices/1) Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5076] device (lo): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external') Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting NetworkManager-dispatcher.service - Network Manager Script Dispatcher Service... ░░ Subject: A start job for unit NetworkManager-dispatcher.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has begun execution. ░░ ░░ The job identifier is 396. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5105] device (lo): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'external') Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5122] device (lo): Activation: starting connection 'lo' (b94848ee-129e-4cfa-9ba4-69218c56ce3f) Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5132] manager: (eth0): new Ethernet device (/org/freedesktop/NetworkManager/Devices/2) Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5140] device (eth0): state change: unmanaged -> unavailable (reason 'managed', managed-type: 'external') Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Started NetworkManager.service - Network Manager. ░░ Subject: A start job for unit NetworkManager.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager.service has finished successfully. ░░ ░░ The job identifier is 218. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5155] bus-manager: acquired D-Bus service "org.freedesktop.NetworkManager" Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5159] device (lo): state change: disconnected -> prepare (reason 'none', managed-type: 'external') Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5171] device (lo): state change: prepare -> config (reason 'none', managed-type: 'external') Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Reached target network.target - Network. ░░ Subject: A start job for unit network.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit network.target has finished successfully. ░░ ░░ The job identifier is 220. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5178] device (lo): state change: config -> ip-config (reason 'none', managed-type: 'external') Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5181] device (eth0): carrier: link connected Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5190] device (lo): state change: ip-config -> ip-check (reason 'none', managed-type: 'external') Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5195] device (eth0): state change: unavailable -> disconnected (reason 'carrier-changed', managed-type: 'full') Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5206] policy: auto-activating connection 'cloud-init eth0' (1dd9a779-d327-56e1-8454-c65e2556c12c) Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5211] device (eth0): Activation: starting connection 'cloud-init eth0' (1dd9a779-d327-56e1-8454-c65e2556c12c) Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5212] device (eth0): state change: disconnected -> prepare (reason 'none', managed-type: 'full') Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5214] manager: NetworkManager state is now CONNECTING Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5215] device (eth0): state change: prepare -> config (reason 'none', managed-type: 'full') Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5224] device (eth0): state change: config -> ip-config (reason 'none', managed-type: 'full') Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5237] dhcp4 (eth0): activation: beginning transaction (timeout in 45 seconds) Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting NetworkManager-wait-online.service - Network Manager Wait Online... ░░ Subject: A start job for unit NetworkManager-wait-online.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-wait-online.service has begun execution. ░░ ░░ The job identifier is 217. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5276] dhcp4 (eth0): state changed new lease, address=10.31.12.161, acd pending Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting gssproxy.service - GSSAPI Proxy Daemon... ░░ Subject: A start job for unit gssproxy.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit gssproxy.service has begun execution. ░░ ░░ The job identifier is 248. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.7058] dhcp4 (eth0): state changed new lease, address=10.31.12.161 Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.7069] policy: set 'cloud-init eth0' (eth0) as default for IPv4 routing and DNS Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.7178] device (eth0): state change: ip-config -> ip-check (reason 'none', managed-type: 'full') Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Started gssproxy.service - GSSAPI Proxy Daemon. ░░ Subject: A start job for unit gssproxy.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit gssproxy.service has finished successfully. ░░ ░░ The job identifier is 248. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: rpc-gssd.service - RPC security service for NFS client and server was skipped because of an unmet condition check (ConditionPathExists=/etc/krb5.keytab). ░░ Subject: A start job for unit rpc-gssd.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit rpc-gssd.service has finished successfully. ░░ ░░ The job identifier is 249. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Reached target nfs-client.target - NFS client services. ░░ Subject: A start job for unit nfs-client.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit nfs-client.target has finished successfully. ░░ ░░ The job identifier is 244. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Reached target remote-fs-pre.target - Preparation for Remote File Systems. ░░ Subject: A start job for unit remote-fs-pre.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit remote-fs-pre.target has finished successfully. ░░ ░░ The job identifier is 246. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Reached target remote-cryptsetup.target - Remote Encrypted Volumes. ░░ Subject: A start job for unit remote-cryptsetup.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit remote-cryptsetup.target has finished successfully. ░░ ░░ The job identifier is 273. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Reached target remote-fs.target - Remote File Systems. ░░ Subject: A start job for unit remote-fs.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit remote-fs.target has finished successfully. ░░ ░░ The job identifier is 243. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: systemd-pcrphase.service - TPM PCR Barrier (User) was skipped because of an unmet condition check (ConditionSecurity=measured-uki). ░░ Subject: A start job for unit systemd-pcrphase.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-pcrphase.service has finished successfully. ░░ ░░ The job identifier is 187. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Started NetworkManager-dispatcher.service - Network Manager Script Dispatcher Service. ░░ Subject: A start job for unit NetworkManager-dispatcher.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has finished successfully. ░░ ░░ The job identifier is 396. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.7494] device (lo): state change: ip-check -> secondaries (reason 'none', managed-type: 'external') Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.7499] device (lo): state change: secondaries -> activated (reason 'none', managed-type: 'external') Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.7513] device (lo): Activation: successful, device activated. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.7527] device (eth0): state change: ip-check -> secondaries (reason 'none', managed-type: 'full') Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.7529] device (eth0): state change: secondaries -> activated (reason 'none', managed-type: 'full') Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.7532] manager: NetworkManager state is now CONNECTED_SITE Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.7534] device (eth0): Activation: successful, device activated. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.7539] manager: NetworkManager state is now CONNECTED_GLOBAL Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.7541] manager: startup complete Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Finished NetworkManager-wait-online.service - Network Manager Wait Online. ░░ Subject: A start job for unit NetworkManager-wait-online.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-wait-online.service has finished successfully. ░░ ░░ The job identifier is 217. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting cloud-init.service - Initial cloud-init job (metadata service crawler)... ░░ Subject: A start job for unit cloud-init.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-init.service has begun execution. ░░ ░░ The job identifier is 272. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com chronyd[673]: Added source 10.11.160.238 Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com chronyd[673]: Added source 10.18.100.10 Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com chronyd[673]: Added source 10.2.32.37 Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com chronyd[673]: Added source 10.2.32.38 Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: Cloud-init v. 24.1.4-21.el10 running 'init' at Sat, 18 Jan 2025 16:23:46 +0000. Up 19.25 seconds. Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: ++++++++++++++++++++++++++++++++++++++Net device info+++++++++++++++++++++++++++++++++++++++ Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: +--------+------+-----------------------------+---------------+--------+-------------------+ Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: | Device | Up | Address | Mask | Scope | Hw-Address | Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: +--------+------+-----------------------------+---------------+--------+-------------------+ Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: | eth0 | True | 10.31.12.161 | 255.255.252.0 | global | 0a:ff:d9:eb:f1:55 | Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: | eth0 | True | fe80::8ff:d9ff:feeb:f155/64 | . | link | 0a:ff:d9:eb:f1:55 | Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: | lo | True | 127.0.0.1 | 255.0.0.0 | host | . | Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: | lo | True | ::1/128 | . | host | . | Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: +--------+------+-----------------------------+---------------+--------+-------------------+ Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: ++++++++++++++++++++++++++++Route IPv4 info+++++++++++++++++++++++++++++ Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: +-------+-------------+------------+---------------+-----------+-------+ Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: | Route | Destination | Gateway | Genmask | Interface | Flags | Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: +-------+-------------+------------+---------------+-----------+-------+ Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: | 0 | 0.0.0.0 | 10.31.12.1 | 0.0.0.0 | eth0 | UG | Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: | 1 | 10.31.12.0 | 0.0.0.0 | 255.255.252.0 | eth0 | U | Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: +-------+-------------+------------+---------------+-----------+-------+ Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: +++++++++++++++++++Route IPv6 info+++++++++++++++++++ Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: +-------+-------------+---------+-----------+-------+ Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: | Route | Destination | Gateway | Interface | Flags | Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: +-------+-------------+---------+-----------+-------+ Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: | 0 | fe80::/64 | :: | eth0 | U | Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: | 2 | multicast | :: | eth0 | U | Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: +-------+-------------+---------+-----------+-------+ Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: Generating public/private rsa key pair. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: Your identification has been saved in /etc/ssh/ssh_host_rsa_key Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: Your public key has been saved in /etc/ssh/ssh_host_rsa_key.pub Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: The key fingerprint is: Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: SHA256:MSShpDEbP9ogRFzRCb42XAP8rHSpHahFRsKUYAV1Obg root@ip-10-31-12-161.us-east-1.aws.redhat.com Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: The key's randomart image is: Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: +---[RSA 3072]----+ Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: |BB%O*.=.. | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: |o+o&o* o | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: |. *.Ooo o | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | ..EoB. o | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | ==* . S | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | ..o.. | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: +----[SHA256]-----+ Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: Generating public/private ecdsa key pair. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: Your identification has been saved in /etc/ssh/ssh_host_ecdsa_key Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: Your public key has been saved in /etc/ssh/ssh_host_ecdsa_key.pub Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: The key fingerprint is: Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: SHA256:GCuXMVhO0FORMh2uLos29Hp7hzwKfYDM2sDdOFQyBFA root@ip-10-31-12-161.us-east-1.aws.redhat.com Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: The key's randomart image is: Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: +---[ECDSA 256]---+ Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: |ooE+.+oo++ | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | +==.o | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | .. =+. | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: |.oo.o O | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: |..++o.* S | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | +...= | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: |..o.o.o. | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | ooo+= . | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | .o+++ o | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: +----[SHA256]-----+ Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: Generating public/private ed25519 key pair. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: Your identification has been saved in /etc/ssh/ssh_host_ed25519_key Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: Your public key has been saved in /etc/ssh/ssh_host_ed25519_key.pub Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: The key fingerprint is: Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: SHA256:BQ2/bBZTO1s7EUC1xyuYfLltQADkjE3Pm1eZdeVIMeM root@ip-10-31-12-161.us-east-1.aws.redhat.com Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: The key's randomart image is: Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: +--[ED25519 256]--+ Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | +*.o+oB.=| Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | *o+..+ O=| Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | . +=oo.E++| Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | o.+*+o+.| Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | S =*.*o. | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | o o =. | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | . o | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | . | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: +----[SHA256]-----+ Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Finished cloud-init.service - Initial cloud-init job (metadata service crawler). ░░ Subject: A start job for unit cloud-init.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-init.service has finished successfully. ░░ ░░ The job identifier is 272. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Reached target cloud-config.target - Cloud-config availability. ░░ Subject: A start job for unit cloud-config.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-config.target has finished successfully. ░░ ░░ The job identifier is 270. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Reached target network-online.target - Network is Online. ░░ Subject: A start job for unit network-online.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit network-online.target has finished successfully. ░░ ░░ The job identifier is 216. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting cloud-config.service - Apply the settings specified in cloud-config... ░░ Subject: A start job for unit cloud-config.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-config.service has begun execution. ░░ ░░ The job identifier is 269. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting kdump.service - Crash recovery kernel arming... ░░ Subject: A start job for unit kdump.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit kdump.service has begun execution. ░░ ░░ The job identifier is 274. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting restraintd.service - The restraint harness.... ░░ Subject: A start job for unit restraintd.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit restraintd.service has begun execution. ░░ ░░ The job identifier is 229. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting rpc-statd-notify.service - Notify NFS peers of a restart... ░░ Subject: A start job for unit rpc-statd-notify.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit rpc-statd-notify.service has begun execution. ░░ ░░ The job identifier is 245. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting rsyslog.service - System Logging Service... ░░ Subject: A start job for unit rsyslog.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit rsyslog.service has begun execution. ░░ ░░ The job identifier is 253. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting sshd.service - OpenSSH server daemon... ░░ Subject: A start job for unit sshd.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd.service has begun execution. ░░ ░░ The job identifier is 260. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com sm-notify[881]: Version 2.8.2 starting Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Started rpc-statd-notify.service - Notify NFS peers of a restart. ░░ Subject: A start job for unit rpc-statd-notify.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit rpc-statd-notify.service has finished successfully. ░░ ░░ The job identifier is 245. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com (sshd)[883]: sshd.service: Referenced but unset environment variable evaluates to an empty string: OPTIONS Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com sshd[883]: Server listening on 0.0.0.0 port 22. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com sshd[883]: Server listening on :: port 22. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Started sshd.service - OpenSSH server daemon. ░░ Subject: A start job for unit sshd.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd.service has finished successfully. ░░ ░░ The job identifier is 260. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Started restraintd.service - The restraint harness.. ░░ Subject: A start job for unit restraintd.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit restraintd.service has finished successfully. ░░ ░░ The job identifier is 229. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com rsyslogd[882]: [origin software="rsyslogd" swVersion="8.2412.0-1.el10" x-pid="882" x-info="https://www.rsyslog.com"] start Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Started rsyslog.service - System Logging Service. ░░ Subject: A start job for unit rsyslog.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit rsyslog.service has finished successfully. ░░ ░░ The job identifier is 253. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com rsyslogd[882]: imjournal: journal files changed, reloading... [v8.2412.0-1.el10 try https://www.rsyslog.com/e/0 ] Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[929]: Cloud-init v. 24.1.4-21.el10 running 'modules:config' at Sat, 18 Jan 2025 16:23:47 +0000. Up 20.91 seconds. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Stopping sshd.service - OpenSSH server daemon... ░░ Subject: A stop job for unit sshd.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit sshd.service has begun execution. ░░ ░░ The job identifier is 499. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com sshd[883]: Received signal 15; terminating. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: sshd.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit sshd.service has successfully entered the 'dead' state. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Stopped sshd.service - OpenSSH server daemon. ░░ Subject: A stop job for unit sshd.service has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit sshd.service has finished. ░░ ░░ The job identifier is 499 and the job result is done. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Stopped target sshd-keygen.target. ░░ Subject: A stop job for unit sshd-keygen.target has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit sshd-keygen.target has finished. ░░ ░░ The job identifier is 583 and the job result is done. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Stopping sshd-keygen.target... ░░ Subject: A stop job for unit sshd-keygen.target has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit sshd-keygen.target has begun execution. ░░ ░░ The job identifier is 583. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: ssh-host-keys-migration.service - Update OpenSSH host key permissions was skipped because of an unmet condition check (ConditionPathExists=!/var/lib/.ssh-host-keys-migration). ░░ Subject: A start job for unit ssh-host-keys-migration.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit ssh-host-keys-migration.service has finished successfully. ░░ ░░ The job identifier is 582. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: sshd-keygen@ecdsa.service - OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target). ░░ Subject: A start job for unit sshd-keygen@ecdsa.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd-keygen@ecdsa.service has finished successfully. ░░ ░░ The job identifier is 580. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: sshd-keygen@ed25519.service - OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target). ░░ Subject: A start job for unit sshd-keygen@ed25519.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd-keygen@ed25519.service has finished successfully. ░░ ░░ The job identifier is 581. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: sshd-keygen@rsa.service - OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target). ░░ Subject: A start job for unit sshd-keygen@rsa.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd-keygen@rsa.service has finished successfully. ░░ ░░ The job identifier is 578. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Reached target sshd-keygen.target. ░░ Subject: A start job for unit sshd-keygen.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd-keygen.target has finished successfully. ░░ ░░ The job identifier is 583. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting sshd.service - OpenSSH server daemon... ░░ Subject: A start job for unit sshd.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd.service has begun execution. ░░ ░░ The job identifier is 499. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com (sshd)[933]: sshd.service: Referenced but unset environment variable evaluates to an empty string: OPTIONS Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com sshd[933]: Server listening on 0.0.0.0 port 22. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com sshd[933]: Server listening on :: port 22. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Started sshd.service - OpenSSH server daemon. ░░ Subject: A start job for unit sshd.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd.service has finished successfully. ░░ ░░ The job identifier is 499. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Finished cloud-config.service - Apply the settings specified in cloud-config. ░░ Subject: A start job for unit cloud-config.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-config.service has finished successfully. ░░ ░░ The job identifier is 269. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting cloud-final.service - Execute cloud user/final scripts... ░░ Subject: A start job for unit cloud-final.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-final.service has begun execution. ░░ ░░ The job identifier is 268. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting systemd-user-sessions.service - Permit User Sessions... ░░ Subject: A start job for unit systemd-user-sessions.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-user-sessions.service has begun execution. ░░ ░░ The job identifier is 255. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Finished systemd-user-sessions.service - Permit User Sessions. ░░ Subject: A start job for unit systemd-user-sessions.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-user-sessions.service has finished successfully. ░░ ░░ The job identifier is 255. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Started crond.service - Command Scheduler. ░░ Subject: A start job for unit crond.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit crond.service has finished successfully. ░░ ░░ The job identifier is 278. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Started getty@tty1.service - Getty on tty1. ░░ Subject: A start job for unit getty@tty1.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit getty@tty1.service has finished successfully. ░░ ░░ The job identifier is 241. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Started serial-getty@ttyS0.service - Serial Getty on ttyS0. ░░ Subject: A start job for unit serial-getty@ttyS0.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit serial-getty@ttyS0.service has finished successfully. ░░ ░░ The job identifier is 236. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Reached target getty.target - Login Prompts. ░░ Subject: A start job for unit getty.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit getty.target has finished successfully. ░░ ░░ The job identifier is 235. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Reached target multi-user.target - Multi-User System. ░░ Subject: A start job for unit multi-user.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit multi-user.target has finished successfully. ░░ ░░ The job identifier is 121. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting systemd-update-utmp-runlevel.service - Record Runlevel Change in UTMP... ░░ Subject: A start job for unit systemd-update-utmp-runlevel.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-update-utmp-runlevel.service has begun execution. ░░ ░░ The job identifier is 257. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: systemd-update-utmp-runlevel.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit systemd-update-utmp-runlevel.service has successfully entered the 'dead' state. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Finished systemd-update-utmp-runlevel.service - Record Runlevel Change in UTMP. ░░ Subject: A start job for unit systemd-update-utmp-runlevel.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-update-utmp-runlevel.service has finished successfully. ░░ ░░ The job identifier is 257. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com crond[938]: (CRON) STARTUP (1.7.0) Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com crond[938]: (CRON) INFO (Syslog will be used instead of sendmail.) Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com crond[938]: (CRON) INFO (RANDOM_DELAY will be scaled with factor 5% if used.) Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com crond[938]: (CRON) INFO (running with inotify support) Jan 18 11:23:48 ip-10-31-12-161.us-east-1.aws.redhat.com restraintd[891]: Listening on http://localhost:8081 Jan 18 11:23:48 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[966]: Cloud-init v. 24.1.4-21.el10 running 'modules:final' at Sat, 18 Jan 2025 16:23:48 +0000. Up 21.37 seconds. Jan 18 11:23:48 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[1001]: ############################################################# Jan 18 11:23:48 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[1002]: -----BEGIN SSH HOST KEY FINGERPRINTS----- Jan 18 11:23:48 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[1004]: 256 SHA256:GCuXMVhO0FORMh2uLos29Hp7hzwKfYDM2sDdOFQyBFA root@ip-10-31-12-161.us-east-1.aws.redhat.com (ECDSA) Jan 18 11:23:48 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[1006]: 256 SHA256:BQ2/bBZTO1s7EUC1xyuYfLltQADkjE3Pm1eZdeVIMeM root@ip-10-31-12-161.us-east-1.aws.redhat.com (ED25519) Jan 18 11:23:48 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[1010]: 3072 SHA256:MSShpDEbP9ogRFzRCb42XAP8rHSpHahFRsKUYAV1Obg root@ip-10-31-12-161.us-east-1.aws.redhat.com (RSA) Jan 18 11:23:48 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[1012]: -----END SSH HOST KEY FINGERPRINTS----- Jan 18 11:23:48 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[1015]: ############################################################# Jan 18 11:23:48 ip-10-31-12-161.us-east-1.aws.redhat.com kdumpctl[887]: kdump: Detected change(s) in the following file(s): /etc/fstab Jan 18 11:23:48 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[966]: Cloud-init v. 24.1.4-21.el10 finished at Sat, 18 Jan 2025 16:23:48 +0000. Datasource DataSourceEc2Local. Up 21.51 seconds Jan 18 11:23:48 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Finished cloud-final.service - Execute cloud user/final scripts. ░░ Subject: A start job for unit cloud-final.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-final.service has finished successfully. ░░ ░░ The job identifier is 268. Jan 18 11:23:48 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Reached target cloud-init.target - Cloud-init target. ░░ Subject: A start job for unit cloud-init.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-init.target has finished successfully. ░░ ░░ The job identifier is 267. Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: Cannot change IRQ 0 affinity: Permission denied Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: IRQ 0 affinity is now unmanaged Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: Cannot change IRQ 48 affinity: Permission denied Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: IRQ 48 affinity is now unmanaged Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: Cannot change IRQ 49 affinity: Permission denied Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: IRQ 49 affinity is now unmanaged Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: Cannot change IRQ 50 affinity: Permission denied Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: IRQ 50 affinity is now unmanaged Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: Cannot change IRQ 51 affinity: Permission denied Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: IRQ 51 affinity is now unmanaged Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: Cannot change IRQ 52 affinity: Permission denied Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: IRQ 52 affinity is now unmanaged Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: Cannot change IRQ 53 affinity: Permission denied Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: IRQ 53 affinity is now unmanaged Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: Cannot change IRQ 54 affinity: Permission denied Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: IRQ 54 affinity is now unmanaged Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: Cannot change IRQ 55 affinity: Permission denied Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: IRQ 55 affinity is now unmanaged Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: Cannot change IRQ 56 affinity: Permission denied Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: IRQ 56 affinity is now unmanaged Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: Cannot change IRQ 57 affinity: Permission denied Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: IRQ 57 affinity is now unmanaged Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: Cannot change IRQ 58 affinity: Permission denied Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: IRQ 58 affinity is now unmanaged Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: Cannot change IRQ 59 affinity: Permission denied Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: IRQ 59 affinity is now unmanaged Jan 18 11:23:51 ip-10-31-12-161.us-east-1.aws.redhat.com chronyd[673]: Selected source 10.2.32.38 Jan 18 11:23:52 ip-10-31-12-161.us-east-1.aws.redhat.com kernel: block xvda: the capability attribute has been deprecated. Jan 18 11:23:52 ip-10-31-12-161.us-east-1.aws.redhat.com kdumpctl[887]: kdump: Rebuilding /boot/initramfs-6.12.0-38.el10.x86_64kdump.img Jan 18 11:23:53 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1398]: dracut-103-1.el10 Jan 18 11:23:53 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1401]: Executing: /usr/bin/dracut --list-modules Jan 18 11:23:53 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1473]: dracut-103-1.el10 Jan 18 11:23:53 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Executing: /usr/bin/dracut --add kdumpbase --quiet --hostonly --hostonly-cmdline --hostonly-i18n --hostonly-mode strict --hostonly-nics --aggressive-strip --omit "rdma plymouth resume ifcfg earlykdump" --mount "/dev/disk/by-uuid/5421f911-fafd-4f0d-bf2e-2916252992eb /sysroot xfs rw,relatime,seclabel,attr2,inode64,logbufs=8,logbsize=32k,noquota" --add squash-squashfs --squash-compressor zstd --no-hostonly-default-device -f /boot/initramfs-6.12.0-38.el10.x86_64kdump.img 6.12.0-38.el10.x86_64 Jan 18 11:23:53 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'systemd-bsod' will not be installed, because command '/usr/lib/systemd/systemd-bsod' could not be found! Jan 18 11:23:53 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'systemd-networkd' will not be installed, because command '/usr/lib/systemd/systemd-networkd' could not be found! Jan 18 11:23:53 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'systemd-networkd' will not be installed, because command '/usr/lib/systemd/systemd-networkd-wait-online' could not be found! Jan 18 11:23:53 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'systemd-pcrphase' will not be installed, because command '/usr/lib/systemd/systemd-pcrphase' could not be found! Jan 18 11:23:53 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'systemd-portabled' will not be installed, because command 'portablectl' could not be found! Jan 18 11:23:53 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'systemd-portabled' will not be installed, because command '/usr/lib/systemd/systemd-portabled' could not be found! Jan 18 11:23:53 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'systemd-resolved' will not be installed, because command '/usr/lib/systemd/systemd-resolved' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'systemd-timesyncd' will not be installed, because command '/usr/lib/systemd/systemd-timesyncd' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'systemd-timesyncd' will not be installed, because command '/usr/lib/systemd/systemd-time-wait-sync' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'busybox' will not be installed, because command 'busybox' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'dbus-daemon' will not be installed, because command 'dbus-daemon' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'connman' will not be installed, because command 'connmand' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'connman' will not be installed, because command 'connmanctl' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'connman' will not be installed, because command 'connmand-wait-online' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'ifcfg' will not be installed, because it's in the list to be omitted! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'plymouth' will not be installed, because it's in the list to be omitted! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: 62bluetooth: Could not find any command of '/usr/lib/bluetooth/bluetoothd /usr/libexec/bluetooth/bluetoothd'! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'btrfs' will not be installed, because command 'btrfs' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'dmraid' will not be installed, because command 'dmraid' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'mdraid' will not be installed, because command 'mdadm' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'multipath' will not be installed, because command 'multipath' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'crypt-gpg' will not be installed, because command 'gpg' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'pcsc' will not be installed, because command 'pcscd' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'cifs' will not be installed, because command 'mount.cifs' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'iscsi' will not be installed, because command 'iscsi-iname' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'iscsi' will not be installed, because command 'iscsiadm' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'iscsi' will not be installed, because command 'iscsid' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'nvmf' will not be installed, because command 'nvme' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'resume' will not be installed, because it's in the list to be omitted! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'squash-erofs' will not be installed, because command 'mkfs.erofs' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'squash-erofs' will not be installed, because command 'fsck.erofs' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'biosdevname' will not be installed, because command 'biosdevname' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'earlykdump' will not be installed, because it's in the list to be omitted! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'systemd-bsod' will not be installed, because command '/usr/lib/systemd/systemd-bsod' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'systemd-pcrphase' will not be installed, because command '/usr/lib/systemd/systemd-pcrphase' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'systemd-portabled' will not be installed, because command 'portablectl' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'systemd-portabled' will not be installed, because command '/usr/lib/systemd/systemd-portabled' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'systemd-resolved' will not be installed, because command '/usr/lib/systemd/systemd-resolved' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'systemd-timesyncd' will not be installed, because command '/usr/lib/systemd/systemd-timesyncd' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'systemd-timesyncd' will not be installed, because command '/usr/lib/systemd/systemd-time-wait-sync' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'busybox' will not be installed, because command 'busybox' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'dbus-daemon' will not be installed, because command 'dbus-daemon' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'connman' will not be installed, because command 'connmand' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'connman' will not be installed, because command 'connmanctl' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'connman' will not be installed, because command 'connmand-wait-online' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: 62bluetooth: Could not find any command of '/usr/lib/bluetooth/bluetoothd /usr/libexec/bluetooth/bluetoothd'! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'btrfs' will not be installed, because command 'btrfs' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'dmraid' will not be installed, because command 'dmraid' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'mdraid' will not be installed, because command 'mdadm' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'multipath' will not be installed, because command 'multipath' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'crypt-gpg' will not be installed, because command 'gpg' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'pcsc' will not be installed, because command 'pcscd' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'cifs' will not be installed, because command 'mount.cifs' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'iscsi' will not be installed, because command 'iscsi-iname' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'iscsi' will not be installed, because command 'iscsiadm' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'iscsi' will not be installed, because command 'iscsid' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'nvmf' will not be installed, because command 'nvme' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'squash-erofs' will not be installed, because command 'mkfs.erofs' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'squash-erofs' will not be installed, because command 'fsck.erofs' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: systemd *** Jan 18 11:23:55 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: fips *** Jan 18 11:23:55 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: fips-crypto-policies *** Jan 18 11:23:55 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: systemd-ask-password *** Jan 18 11:23:55 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: systemd-initrd *** Jan 18 11:23:55 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: systemd-journald *** Jan 18 11:23:55 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: systemd-modules-load *** Jan 18 11:23:55 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: systemd-sysctl *** Jan 18 11:23:55 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: systemd-sysusers *** Jan 18 11:23:55 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: systemd-tmpfiles *** Jan 18 11:23:55 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: systemd-udevd *** Jan 18 11:23:55 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: rngd *** Jan 18 11:23:55 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: i18n *** Jan 18 11:23:55 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: drm *** Jan 18 11:23:55 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: prefixdevname *** Jan 18 11:23:55 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: kernel-modules *** Jan 18 11:23:55 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit NetworkManager-dispatcher.service has successfully entered the 'dead' state. Jan 18 11:23:56 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: kernel-modules-extra *** Jan 18 11:23:56 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: kernel-modules-extra: configuration source "/run/depmod.d" does not exist Jan 18 11:23:56 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: kernel-modules-extra: configuration source "/lib/depmod.d" does not exist Jan 18 11:23:56 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: kernel-modules-extra: parsing configuration file "/etc/depmod.d/dist.conf" Jan 18 11:23:56 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: kernel-modules-extra: /etc/depmod.d/dist.conf: added "updates extra built-in weak-updates" to the list of search directories Jan 18 11:23:56 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: pcmcia *** Jan 18 11:23:56 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Skipping udev rule: 60-pcmcia.rules Jan 18 11:23:56 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: fstab-sys *** Jan 18 11:23:56 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: hwdb *** Jan 18 11:23:56 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: rootfs-block *** Jan 18 11:23:56 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: squash-squashfs *** Jan 18 11:23:56 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: terminfo *** Jan 18 11:23:56 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: udev-rules *** Jan 18 11:23:56 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: dracut-systemd *** Jan 18 11:23:56 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: usrmount *** Jan 18 11:23:56 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: base *** Jan 18 11:23:56 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: fs-lib *** Jan 18 11:23:57 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: kdumpbase *** Jan 18 11:23:57 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: memstrack *** Jan 18 11:23:57 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: microcode_ctl-fw_dir_override *** Jan 18 11:23:57 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: microcode_ctl module: mangling fw_dir Jan 18 11:23:57 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: microcode_ctl: reset fw_dir to "/lib/firmware/updates /lib/firmware" Jan 18 11:23:57 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: microcode_ctl: processing data directory "/usr/share/microcode_ctl/ucode_with_caveats/intel"... Jan 18 11:23:57 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: microcode_ctl: intel: caveats check for kernel version "6.12.0-38.el10.x86_64" passed, adding "/usr/share/microcode_ctl/ucode_with_caveats/intel" to fw_dir variable Jan 18 11:23:57 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: microcode_ctl: processing data directory "/usr/share/microcode_ctl/ucode_with_caveats/intel-06-4f-01"... Jan 18 11:23:57 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: microcode_ctl: configuration "intel-06-4f-01" is ignored Jan 18 11:23:57 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: microcode_ctl: final fw_dir: "/usr/share/microcode_ctl/ucode_with_caveats/intel /lib/firmware/updates /lib/firmware" Jan 18 11:23:57 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: shutdown *** Jan 18 11:23:57 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: squash-lib *** Jan 18 11:23:57 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including modules done *** Jan 18 11:23:57 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Installing kernel module dependencies *** Jan 18 11:23:57 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Installing kernel module dependencies done *** Jan 18 11:23:57 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Resolving executable dependencies *** Jan 18 11:23:58 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Resolving executable dependencies done *** Jan 18 11:23:58 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Hardlinking files *** Jan 18 11:23:58 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Mode: real Jan 18 11:23:58 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Method: sha256 Jan 18 11:23:58 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Files: 548 Jan 18 11:23:58 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Linked: 25 files Jan 18 11:23:58 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Compared: 0 xattrs Jan 18 11:23:58 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Compared: 53 files Jan 18 11:23:58 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Saved: 13.6 MiB Jan 18 11:23:58 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Duration: 0.174529 seconds Jan 18 11:23:58 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Hardlinking files done *** Jan 18 11:23:58 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Generating early-microcode cpio image *** Jan 18 11:23:58 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Constructing GenuineIntel.bin *** Jan 18 11:23:58 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Constructing GenuineIntel.bin *** Jan 18 11:23:58 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Store current command line parameters *** Jan 18 11:23:58 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Stored kernel commandline: Jan 18 11:23:58 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: No dracut internal kernel commandline stored in the initramfs Jan 18 11:23:59 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Squashing the files inside the initramfs *** Jan 18 11:24:07 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Squashing the files inside the initramfs done *** Jan 18 11:24:07 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Creating image file '/boot/initramfs-6.12.0-38.el10.x86_64kdump.img' *** Jan 18 11:24:07 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Creating initramfs image file '/boot/initramfs-6.12.0-38.el10.x86_64kdump.img' done *** Jan 18 11:24:07 ip-10-31-12-161.us-east-1.aws.redhat.com kernel: PKCS7: Message signed outside of X.509 validity window Jan 18 11:24:08 ip-10-31-12-161.us-east-1.aws.redhat.com kdumpctl[887]: kdump: kexec: loaded kdump kernel Jan 18 11:24:08 ip-10-31-12-161.us-east-1.aws.redhat.com kdumpctl[887]: kdump: Starting kdump: [OK] Jan 18 11:24:08 ip-10-31-12-161.us-east-1.aws.redhat.com kdumpctl[887]: kdump: Notice: No vmcore creation test performed! Jan 18 11:24:08 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Finished kdump.service - Crash recovery kernel arming. ░░ Subject: A start job for unit kdump.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit kdump.service has finished successfully. ░░ ░░ The job identifier is 274. Jan 18 11:24:08 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Startup finished in 999ms (kernel) + 4.893s (initrd) + 35.521s (userspace) = 41.414s. ░░ Subject: System start-up is now complete ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ All system services necessary queued for starting at boot have been ░░ started. Note that this does not mean that the machine is now idle as services ░░ might still be busy with completing start-up. ░░ ░░ Kernel start-up required 999279 microseconds. ░░ ░░ Initrd start-up required 4893825 microseconds. ░░ ░░ Userspace start-up required 35521236 microseconds. Jan 18 11:24:15 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: systemd-hostnamed.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit systemd-hostnamed.service has successfully entered the 'dead' state. Jan 18 11:24:57 ip-10-31-12-161.us-east-1.aws.redhat.com chronyd[673]: Selected source 24.187.197.51 (2.centos.pool.ntp.org) Jan 18 11:26:37 ip-10-31-12-161.us-east-1.aws.redhat.com sshd-session[4336]: Accepted publickey for root from 10.30.32.164 port 60662 ssh2: RSA SHA256:W3cSdmPJK+d9RwU97ardijPXIZnxHswrpTHWW9oYtEU Jan 18 11:26:37 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Created slice user-0.slice - User Slice of UID 0. ░░ Subject: A start job for unit user-0.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit user-0.slice has finished successfully. ░░ ░░ The job identifier is 662. Jan 18 11:26:37 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting user-runtime-dir@0.service - User Runtime Directory /run/user/0... ░░ Subject: A start job for unit user-runtime-dir@0.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit user-runtime-dir@0.service has begun execution. ░░ ░░ The job identifier is 584. Jan 18 11:26:37 ip-10-31-12-161.us-east-1.aws.redhat.com systemd-logind[661]: New session 1 of user root. ░░ Subject: A new session 1 has been created for user root ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ Documentation: sd-login(3) ░░ ░░ A new session with the ID 1 has been created for the user root. ░░ ░░ The leading process of the session is 4336. Jan 18 11:26:37 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Finished user-runtime-dir@0.service - User Runtime Directory /run/user/0. ░░ Subject: A start job for unit user-runtime-dir@0.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit user-runtime-dir@0.service has finished successfully. ░░ ░░ The job identifier is 584. Jan 18 11:26:37 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting user@0.service - User Manager for UID 0... ░░ Subject: A start job for unit user@0.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit user@0.service has begun execution. ░░ ░░ The job identifier is 664. Jan 18 11:26:37 ip-10-31-12-161.us-east-1.aws.redhat.com systemd-logind[661]: New session 2 of user root. ░░ Subject: A new session 2 has been created for user root ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ Documentation: sd-login(3) ░░ ░░ A new session with the ID 2 has been created for the user root. ░░ ░░ The leading process of the session is 4341. Jan 18 11:26:37 ip-10-31-12-161.us-east-1.aws.redhat.com (systemd)[4341]: pam_unix(systemd-user:session): session opened for user root(uid=0) by root(uid=0) Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[4341]: Queued start job for default target default.target. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[4341]: Created slice app.slice - User Application Slice. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 9. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[4341]: grub-boot-success.timer - Mark boot as successful after the user session has run 2 minutes was skipped because of an unmet condition check (ConditionUser=!@system). ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 6. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[4341]: Started systemd-tmpfiles-clean.timer - Daily Cleanup of User's Temporary Directories. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 4. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[4341]: Reached target paths.target - Paths. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 12. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[4341]: Reached target timers.target - Timers. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 3. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[4341]: Starting dbus.socket - D-Bus User Message Bus Socket... ░░ Subject: A start job for unit UNIT has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has begun execution. ░░ ░░ The job identifier is 8. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[4341]: Starting systemd-tmpfiles-setup.service - Create User Files and Directories... ░░ Subject: A start job for unit UNIT has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has begun execution. ░░ ░░ The job identifier is 11. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[4341]: Finished systemd-tmpfiles-setup.service - Create User Files and Directories. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 11. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[4341]: Listening on dbus.socket - D-Bus User Message Bus Socket. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 8. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[4341]: Reached target sockets.target - Sockets. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 7. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[4341]: Reached target basic.target - Basic System. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 2. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[4341]: Reached target default.target - Main User Target. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 1. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[4341]: Startup finished in 116ms. ░░ Subject: User manager start-up is now complete ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The user manager instance for user 0 has been started. All services queued ░░ for starting have been started. Note that other services might still be starting ░░ up or be started at any later time. ░░ ░░ Startup of the manager took 116963 microseconds. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Started user@0.service - User Manager for UID 0. ░░ Subject: A start job for unit user@0.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit user@0.service has finished successfully. ░░ ░░ The job identifier is 664. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Started session-1.scope - Session 1 of User root. ░░ Subject: A start job for unit session-1.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit session-1.scope has finished successfully. ░░ ░░ The job identifier is 745. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com sshd-session[4336]: pam_unix(sshd:session): session opened for user root(uid=0) by root(uid=0) Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com sshd-session[4352]: Received disconnect from 10.30.32.164 port 60662:11: disconnected by user Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com sshd-session[4352]: Disconnected from user root 10.30.32.164 port 60662 Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com sshd-session[4336]: pam_unix(sshd:session): session closed for user root Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd-logind[661]: Session 1 logged out. Waiting for processes to exit. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: session-1.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit session-1.scope has successfully entered the 'dead' state. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd-logind[661]: Removed session 1. ░░ Subject: Session 1 has been terminated ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ Documentation: sd-login(3) ░░ ░░ A session with the ID 1 has been terminated. Jan 18 11:26:41 ip-10-31-12-161.us-east-1.aws.redhat.com sshd-session[4388]: Accepted publickey for root from 10.31.11.42 port 54246 ssh2: RSA SHA256:W3cSdmPJK+d9RwU97ardijPXIZnxHswrpTHWW9oYtEU Jan 18 11:26:41 ip-10-31-12-161.us-east-1.aws.redhat.com sshd-session[4389]: Accepted publickey for root from 10.31.11.42 port 54248 ssh2: RSA SHA256:W3cSdmPJK+d9RwU97ardijPXIZnxHswrpTHWW9oYtEU Jan 18 11:26:41 ip-10-31-12-161.us-east-1.aws.redhat.com systemd-logind[661]: New session 3 of user root. ░░ Subject: A new session 3 has been created for user root ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ Documentation: sd-login(3) ░░ ░░ A new session with the ID 3 has been created for the user root. ░░ ░░ The leading process of the session is 4388. Jan 18 11:26:41 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Started session-3.scope - Session 3 of User root. ░░ Subject: A start job for unit session-3.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit session-3.scope has finished successfully. ░░ ░░ The job identifier is 827. Jan 18 11:26:41 ip-10-31-12-161.us-east-1.aws.redhat.com systemd-logind[661]: New session 4 of user root. ░░ Subject: A new session 4 has been created for user root ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ Documentation: sd-login(3) ░░ ░░ A new session with the ID 4 has been created for the user root. ░░ ░░ The leading process of the session is 4389. Jan 18 11:26:41 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Started session-4.scope - Session 4 of User root. ░░ Subject: A start job for unit session-4.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit session-4.scope has finished successfully. ░░ ░░ The job identifier is 909. Jan 18 11:26:41 ip-10-31-12-161.us-east-1.aws.redhat.com sshd-session[4388]: pam_unix(sshd:session): session opened for user root(uid=0) by root(uid=0) Jan 18 11:26:41 ip-10-31-12-161.us-east-1.aws.redhat.com sshd-session[4389]: pam_unix(sshd:session): session opened for user root(uid=0) by root(uid=0) Jan 18 11:26:41 ip-10-31-12-161.us-east-1.aws.redhat.com sshd-session[4395]: Received disconnect from 10.31.11.42 port 54248:11: disconnected by user Jan 18 11:26:41 ip-10-31-12-161.us-east-1.aws.redhat.com sshd-session[4395]: Disconnected from user root 10.31.11.42 port 54248 Jan 18 11:26:41 ip-10-31-12-161.us-east-1.aws.redhat.com sshd-session[4389]: pam_unix(sshd:session): session closed for user root Jan 18 11:26:41 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: session-4.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit session-4.scope has successfully entered the 'dead' state. Jan 18 11:26:41 ip-10-31-12-161.us-east-1.aws.redhat.com systemd-logind[661]: Session 4 logged out. Waiting for processes to exit. Jan 18 11:26:41 ip-10-31-12-161.us-east-1.aws.redhat.com systemd-logind[661]: Removed session 4. ░░ Subject: Session 4 has been terminated ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ Documentation: sd-login(3) ░░ ░░ A session with the ID 4 has been terminated. Jan 18 11:27:05 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting systemd-hostnamed.service - Hostname Service... ░░ Subject: A start job for unit systemd-hostnamed.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-hostnamed.service has begun execution. ░░ ░░ The job identifier is 991. Jan 18 11:27:05 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Started systemd-hostnamed.service - Hostname Service. ░░ Subject: A start job for unit systemd-hostnamed.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-hostnamed.service has finished successfully. ░░ ░░ The job identifier is 991. Jan 18 11:27:05 managed-node1 systemd-hostnamed[5843]: Hostname set to (static) Jan 18 11:27:05 managed-node1 NetworkManager[711]: [1737217625.1812] hostname: static hostname changed from "ip-10-31-12-161.us-east-1.aws.redhat.com" to "managed-node1" Jan 18 11:27:05 managed-node1 systemd[1]: Starting NetworkManager-dispatcher.service - Network Manager Script Dispatcher Service... ░░ Subject: A start job for unit NetworkManager-dispatcher.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has begun execution. ░░ ░░ The job identifier is 1069. Jan 18 11:27:05 managed-node1 systemd[1]: Started NetworkManager-dispatcher.service - Network Manager Script Dispatcher Service. ░░ Subject: A start job for unit NetworkManager-dispatcher.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has finished successfully. ░░ ░░ The job identifier is 1069. Jan 18 11:27:15 managed-node1 systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit NetworkManager-dispatcher.service has successfully entered the 'dead' state. Jan 18 11:27:33 managed-node1 sshd-session[6513]: Accepted publickey for root from 10.31.43.51 port 59536 ssh2: RSA SHA256:9j1blwt3wcrRiGYZQ7ZGu9axm3cDklH6/z4c+Ee8CzE Jan 18 11:27:33 managed-node1 systemd-logind[661]: New session 5 of user root. ░░ Subject: A new session 5 has been created for user root ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ Documentation: sd-login(3) ░░ ░░ A new session with the ID 5 has been created for the user root. ░░ ░░ The leading process of the session is 6513. Jan 18 11:27:33 managed-node1 systemd[1]: Started session-5.scope - Session 5 of User root. ░░ Subject: A start job for unit session-5.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit session-5.scope has finished successfully. ░░ ░░ The job identifier is 1148. Jan 18 11:27:33 managed-node1 sshd-session[6513]: pam_unix(sshd:session): session opened for user root(uid=0) by root(uid=0) Jan 18 11:27:35 managed-node1 systemd[1]: systemd-hostnamed.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit systemd-hostnamed.service has successfully entered the 'dead' state. Jan 18 11:27:35 managed-node1 python3.12[6665]: ansible-ansible.legacy.setup Invoked with gather_subset=['all'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d Jan 18 11:27:36 managed-node1 python3.12[6819]: ansible-stat Invoked with path=/run/ostree-booted follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jan 18 11:27:37 managed-node1 python3.12[6944]: ansible-stat Invoked with path=/sbin/transactional-update follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jan 18 11:27:38 managed-node1 sudo[7194]: root : TTY=pts/0 ; PWD=/root ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ecggmjqitbnmvffvmmvciguphmjxzmlw ; /usr/bin/python3.12 /root/.ansible/tmp/ansible-tmp-1737217658.4395213-6967-33253008089585/AnsiballZ_dnf.py' Jan 18 11:27:38 managed-node1 sudo[7194]: pam_unix(sudo:session): session opened for user root(uid=0) by root(uid=0) Jan 18 11:27:38 managed-node1 python3.12[7197]: ansible-ansible.legacy.dnf Invoked with name=['iptables-nft', 'podman', 'shadow-utils-subid'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None Jan 18 11:27:48 managed-node1 kernel: SELinux: Converting 381 SID table entries... Jan 18 11:27:48 managed-node1 kernel: SELinux: policy capability network_peer_controls=1 Jan 18 11:27:48 managed-node1 kernel: SELinux: policy capability open_perms=1 Jan 18 11:27:48 managed-node1 kernel: SELinux: policy capability extended_socket_class=1 Jan 18 11:27:48 managed-node1 kernel: SELinux: policy capability always_check_network=0 Jan 18 11:27:48 managed-node1 kernel: SELinux: policy capability cgroup_seclabel=1 Jan 18 11:27:48 managed-node1 kernel: SELinux: policy capability nnp_nosuid_transition=1 Jan 18 11:27:48 managed-node1 kernel: SELinux: policy capability genfs_seclabel_symlinks=1 Jan 18 11:27:48 managed-node1 kernel: SELinux: policy capability ioctl_skip_cloexec=0 Jan 18 11:27:48 managed-node1 kernel: SELinux: policy capability userspace_initial_context=0 Jan 18 11:27:50 managed-node1 kernel: SELinux: Converting 382 SID table entries... Jan 18 11:27:50 managed-node1 kernel: SELinux: policy capability network_peer_controls=1 Jan 18 11:27:50 managed-node1 kernel: SELinux: policy capability open_perms=1 Jan 18 11:27:50 managed-node1 kernel: SELinux: policy capability extended_socket_class=1 Jan 18 11:27:50 managed-node1 kernel: SELinux: policy capability always_check_network=0 Jan 18 11:27:50 managed-node1 kernel: SELinux: policy capability cgroup_seclabel=1 Jan 18 11:27:50 managed-node1 kernel: SELinux: policy capability nnp_nosuid_transition=1 Jan 18 11:27:50 managed-node1 kernel: SELinux: policy capability genfs_seclabel_symlinks=1 Jan 18 11:27:50 managed-node1 kernel: SELinux: policy capability ioctl_skip_cloexec=0 Jan 18 11:27:50 managed-node1 kernel: SELinux: policy capability userspace_initial_context=0 Jan 18 11:27:51 managed-node1 setsebool[7282]: The virt_use_nfs policy boolean was changed to 1 by root Jan 18 11:27:51 managed-node1 setsebool[7282]: The virt_sandbox_use_all_caps policy boolean was changed to 1 by root Jan 18 11:27:54 managed-node1 kernel: SELinux: Converting 389 SID table entries... Jan 18 11:27:54 managed-node1 kernel: SELinux: policy capability network_peer_controls=1 Jan 18 11:27:54 managed-node1 kernel: SELinux: policy capability open_perms=1 Jan 18 11:27:54 managed-node1 kernel: SELinux: policy capability extended_socket_class=1 Jan 18 11:27:54 managed-node1 kernel: SELinux: policy capability always_check_network=0 Jan 18 11:27:54 managed-node1 kernel: SELinux: policy capability cgroup_seclabel=1 Jan 18 11:27:54 managed-node1 kernel: SELinux: policy capability nnp_nosuid_transition=1 Jan 18 11:27:54 managed-node1 kernel: SELinux: policy capability genfs_seclabel_symlinks=1 Jan 18 11:27:54 managed-node1 kernel: SELinux: policy capability ioctl_skip_cloexec=0 Jan 18 11:27:54 managed-node1 kernel: SELinux: policy capability userspace_initial_context=0 Jan 18 11:27:54 managed-node1 groupadd[7304]: group added to /etc/group: name=polkitd, GID=114 Jan 18 11:27:54 managed-node1 groupadd[7304]: group added to /etc/gshadow: name=polkitd Jan 18 11:27:54 managed-node1 groupadd[7304]: new group: name=polkitd, GID=114 Jan 18 11:27:54 managed-node1 useradd[7307]: new user: name=polkitd, UID=114, GID=114, home=/, shell=/sbin/nologin, from=none Jan 18 11:27:54 managed-node1 dbus-broker-launch[654]: Noticed file-system modification, trigger reload. ░░ Subject: A configuration directory was written to ░░ Defined-By: dbus-broker ░░ Support: https://groups.google.com/forum/#!forum/bus1-devel ░░ ░░ A write was detected to one of the directories containing D-Bus configuration ░░ files, triggering a configuration reload. ░░ ░░ This functionality exists for backwards compatibility to pick up changes to ░░ D-Bus configuration without an explicit reolad request. Typically when ░░ installing or removing third-party software causes D-Bus configuration files ░░ to be added or removed. ░░ ░░ It is worth noting that this may cause partial configuration to be loaded in ░░ case dispatching this notification races with the writing of the configuration ░░ files. However, a future notification will then cause the configuration to be ░░ reladed again. Jan 18 11:27:55 managed-node1 dbus-broker-launch[654]: Noticed file-system modification, trigger reload. ░░ Subject: A configuration directory was written to ░░ Defined-By: dbus-broker ░░ Support: https://groups.google.com/forum/#!forum/bus1-devel ░░ ░░ A write was detected to one of the directories containing D-Bus configuration ░░ files, triggering a configuration reload. ░░ ░░ This functionality exists for backwards compatibility to pick up changes to ░░ D-Bus configuration without an explicit reolad request. Typically when ░░ installing or removing third-party software causes D-Bus configuration files ░░ to be added or removed. ░░ ░░ It is worth noting that this may cause partial configuration to be loaded in ░░ case dispatching this notification races with the writing of the configuration ░░ files. However, a future notification will then cause the configuration to be ░░ reladed again. Jan 18 11:27:55 managed-node1 dbus-broker-launch[654]: Noticed file-system modification, trigger reload. ░░ Subject: A configuration directory was written to ░░ Defined-By: dbus-broker ░░ Support: https://groups.google.com/forum/#!forum/bus1-devel ░░ ░░ A write was detected to one of the directories containing D-Bus configuration ░░ files, triggering a configuration reload. ░░ ░░ This functionality exists for backwards compatibility to pick up changes to ░░ D-Bus configuration without an explicit reolad request. Typically when ░░ installing or removing third-party software causes D-Bus configuration files ░░ to be added or removed. ░░ ░░ It is worth noting that this may cause partial configuration to be loaded in ░░ case dispatching this notification races with the writing of the configuration ░░ files. However, a future notification will then cause the configuration to be ░░ reladed again. Jan 18 11:27:55 managed-node1 systemd[1]: Listening on pcscd.socket - PC/SC Smart Card Daemon Activation Socket. ░░ Subject: A start job for unit pcscd.socket has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit pcscd.socket has finished successfully. ░░ ░░ The job identifier is 1233. Jan 18 11:27:56 managed-node1 systemd[1]: Started run-p7706-i8006.service - [systemd-run] /usr/bin/systemctl start man-db-cache-update. ░░ Subject: A start job for unit run-p7706-i8006.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit run-p7706-i8006.service has finished successfully. ░░ ░░ The job identifier is 1311. Jan 18 11:27:56 managed-node1 systemctl[7707]: Warning: The unit file, source configuration file or drop-ins of man-db-cache-update.service changed on disk. Run 'systemctl daemon-reload' to reload units. Jan 18 11:27:56 managed-node1 systemd[1]: Starting man-db-cache-update.service... ░░ Subject: A start job for unit man-db-cache-update.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit man-db-cache-update.service has begun execution. ░░ ░░ The job identifier is 1389. Jan 18 11:27:56 managed-node1 systemd[1]: Reload requested from client PID 7710 ('systemctl') (unit session-5.scope)... Jan 18 11:27:56 managed-node1 systemd[1]: Reloading... Jan 18 11:27:56 managed-node1 systemd-ssh-generator[7749]: Failed to query local AF_VSOCK CID: Permission denied Jan 18 11:27:56 managed-node1 systemd-rc-local-generator[7746]: /etc/rc.d/rc.local is not marked executable, skipping. Jan 18 11:27:56 managed-node1 (sd-exec-[7731]: /usr/lib/systemd/system-generators/systemd-ssh-generator failed with exit status 1. Jan 18 11:27:56 managed-node1 systemd[1]: Reloading finished in 266 ms. Jan 18 11:27:56 managed-node1 systemd[1]: Queuing reload/restart jobs for marked units… Jan 18 11:27:56 managed-node1 systemd[1]: Reloading user@0.service - User Manager for UID 0... ░░ Subject: A reload job for unit user@0.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A reload job for unit user@0.service has begun execution. ░░ ░░ The job identifier is 1467. Jan 18 11:27:56 managed-node1 systemd[4341]: Received SIGRTMIN+25 from PID 1 (systemd). Jan 18 11:27:56 managed-node1 systemd[4341]: Reexecuting. Jan 18 11:27:56 managed-node1 systemd[1]: Reloaded user@0.service - User Manager for UID 0. ░░ Subject: A reload job for unit user@0.service has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A reload job for unit user@0.service has finished. ░░ ░░ The job identifier is 1467 and the job result is done. Jan 18 11:27:57 managed-node1 sudo[7194]: pam_unix(sudo:session): session closed for user root Jan 18 11:27:58 managed-node1 python3.12[8436]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jan 18 11:27:58 managed-node1 systemd[1]: man-db-cache-update.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit man-db-cache-update.service has successfully entered the 'dead' state. Jan 18 11:27:58 managed-node1 systemd[1]: Finished man-db-cache-update.service. ░░ Subject: A start job for unit man-db-cache-update.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit man-db-cache-update.service has finished successfully. ░░ ░░ The job identifier is 1389. Jan 18 11:27:58 managed-node1 systemd[1]: run-p7706-i8006.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-p7706-i8006.service has successfully entered the 'dead' state. Jan 18 11:27:59 managed-node1 python3.12[8577]: ansible-getent Invoked with database=passwd key=root fail_key=False service=None split=None Jan 18 11:27:59 managed-node1 python3.12[8709]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jan 18 11:28:01 managed-node1 python3.12[8842]: ansible-tempfile Invoked with prefix=lsr_ suffix=_podman state=directory path=None Jan 18 11:28:01 managed-node1 python3.12[8973]: ansible-file Invoked with path=/tmp/lsr_pjrhafco_podman/auth state=directory mode=0700 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jan 18 11:28:02 managed-node1 python3.12[9104]: ansible-stat Invoked with path=/run/ostree-booted follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jan 18 11:28:03 managed-node1 python3.12[9235]: ansible-ansible.legacy.dnf Invoked with name=['python3-pyasn1', 'python3-cryptography', 'python3-dbus'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None Jan 18 11:28:05 managed-node1 python3.12[9378]: ansible-ansible.legacy.dnf Invoked with name=['certmonger', 'python3-packaging'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None Jan 18 11:28:06 managed-node1 dbus-broker-launch[654]: Noticed file-system modification, trigger reload. ░░ Subject: A configuration directory was written to ░░ Defined-By: dbus-broker ░░ Support: https://groups.google.com/forum/#!forum/bus1-devel ░░ ░░ A write was detected to one of the directories containing D-Bus configuration ░░ files, triggering a configuration reload. ░░ ░░ This functionality exists for backwards compatibility to pick up changes to ░░ D-Bus configuration without an explicit reolad request. Typically when ░░ installing or removing third-party software causes D-Bus configuration files ░░ to be added or removed. ░░ ░░ It is worth noting that this may cause partial configuration to be loaded in ░░ case dispatching this notification races with the writing of the configuration ░░ files. However, a future notification will then cause the configuration to be ░░ reladed again. Jan 18 11:28:06 managed-node1 dbus-broker-launch[654]: Noticed file-system modification, trigger reload. ░░ Subject: A configuration directory was written to ░░ Defined-By: dbus-broker ░░ Support: https://groups.google.com/forum/#!forum/bus1-devel ░░ ░░ A write was detected to one of the directories containing D-Bus configuration ░░ files, triggering a configuration reload. ░░ ░░ This functionality exists for backwards compatibility to pick up changes to ░░ D-Bus configuration without an explicit reolad request. Typically when ░░ installing or removing third-party software causes D-Bus configuration files ░░ to be added or removed. ░░ ░░ It is worth noting that this may cause partial configuration to be loaded in ░░ case dispatching this notification races with the writing of the configuration ░░ files. However, a future notification will then cause the configuration to be ░░ reladed again. Jan 18 11:28:06 managed-node1 dbus-broker-launch[654]: Noticed file-system modification, trigger reload. ░░ Subject: A configuration directory was written to ░░ Defined-By: dbus-broker ░░ Support: https://groups.google.com/forum/#!forum/bus1-devel ░░ ░░ A write was detected to one of the directories containing D-Bus configuration ░░ files, triggering a configuration reload. ░░ ░░ This functionality exists for backwards compatibility to pick up changes to ░░ D-Bus configuration without an explicit reolad request. Typically when ░░ installing or removing third-party software causes D-Bus configuration files ░░ to be added or removed. ░░ ░░ It is worth noting that this may cause partial configuration to be loaded in ░░ case dispatching this notification races with the writing of the configuration ░░ files. However, a future notification will then cause the configuration to be ░░ reladed again. Jan 18 11:28:06 managed-node1 systemd[1]: Reload requested from client PID 9397 ('systemctl') (unit session-5.scope)... Jan 18 11:28:06 managed-node1 systemd[1]: Reloading... Jan 18 11:28:06 managed-node1 systemd-rc-local-generator[9443]: /etc/rc.d/rc.local is not marked executable, skipping. Jan 18 11:28:06 managed-node1 systemd-ssh-generator[9445]: Failed to query local AF_VSOCK CID: Permission denied Jan 18 11:28:06 managed-node1 (sd-exec-[9415]: /usr/lib/systemd/system-generators/systemd-ssh-generator failed with exit status 1. Jan 18 11:28:06 managed-node1 systemd[1]: Reloading finished in 192 ms. Jan 18 11:28:06 managed-node1 systemd[1]: Started run-p9454-i9754.service - [systemd-run] /usr/bin/systemctl start man-db-cache-update. ░░ Subject: A start job for unit run-p9454-i9754.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit run-p9454-i9754.service has finished successfully. ░░ ░░ The job identifier is 1472. Jan 18 11:28:06 managed-node1 systemd[1]: Starting man-db-cache-update.service... ░░ Subject: A start job for unit man-db-cache-update.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit man-db-cache-update.service has begun execution. ░░ ░░ The job identifier is 1550. Jan 18 11:28:06 managed-node1 systemd[1]: Reload requested from client PID 9458 ('systemctl') (unit session-5.scope)... Jan 18 11:28:06 managed-node1 systemd[1]: Reloading... Jan 18 11:28:06 managed-node1 systemd-rc-local-generator[9505]: /etc/rc.d/rc.local is not marked executable, skipping. Jan 18 11:28:06 managed-node1 systemd-ssh-generator[9507]: Failed to query local AF_VSOCK CID: Permission denied Jan 18 11:28:06 managed-node1 (sd-exec-[9480]: /usr/lib/systemd/system-generators/systemd-ssh-generator failed with exit status 1. Jan 18 11:28:07 managed-node1 systemd[1]: Reloading finished in 336 ms. Jan 18 11:28:07 managed-node1 systemd[1]: Queuing reload/restart jobs for marked units… Jan 18 11:28:07 managed-node1 systemd[1]: man-db-cache-update.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit man-db-cache-update.service has successfully entered the 'dead' state. Jan 18 11:28:07 managed-node1 systemd[1]: Finished man-db-cache-update.service. ░░ Subject: A start job for unit man-db-cache-update.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit man-db-cache-update.service has finished successfully. ░░ ░░ The job identifier is 1550. Jan 18 11:28:07 managed-node1 systemd[1]: run-p9454-i9754.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-p9454-i9754.service has successfully entered the 'dead' state. Jan 18 11:28:07 managed-node1 python3.12[9651]: ansible-file Invoked with name=/etc/certmonger//pre-scripts owner=root group=root mode=0700 state=directory path=/etc/certmonger//pre-scripts recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Jan 18 11:28:08 managed-node1 python3.12[9782]: ansible-file Invoked with name=/etc/certmonger//post-scripts owner=root group=root mode=0700 state=directory path=/etc/certmonger//post-scripts recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Jan 18 11:28:09 managed-node1 python3.12[9913]: ansible-ansible.legacy.systemd Invoked with name=certmonger state=started enabled=True daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None Jan 18 11:28:09 managed-node1 systemd[1]: Reload requested from client PID 9916 ('systemctl') (unit session-5.scope)... Jan 18 11:28:09 managed-node1 systemd[1]: Reloading... Jan 18 11:28:09 managed-node1 systemd-rc-local-generator[9960]: /etc/rc.d/rc.local is not marked executable, skipping. Jan 18 11:28:09 managed-node1 systemd-ssh-generator[9963]: Failed to query local AF_VSOCK CID: Permission denied Jan 18 11:28:09 managed-node1 (sd-exec-[9934]: /usr/lib/systemd/system-generators/systemd-ssh-generator failed with exit status 1. Jan 18 11:28:09 managed-node1 systemd[1]: Reloading finished in 197 ms. Jan 18 11:28:09 managed-node1 systemd[1]: Starting certmonger.service - Certificate monitoring and PKI enrollment... ░░ Subject: A start job for unit certmonger.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit certmonger.service has begun execution. ░░ ░░ The job identifier is 1628. Jan 18 11:28:09 managed-node1 (rtmonger)[9971]: certmonger.service: Referenced but unset environment variable evaluates to an empty string: OPTS Jan 18 11:28:09 managed-node1 systemd[1]: Started certmonger.service - Certificate monitoring and PKI enrollment. ░░ Subject: A start job for unit certmonger.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit certmonger.service has finished successfully. ░░ ░░ The job identifier is 1628. Jan 18 11:28:10 managed-node1 python3.12[10129]: ansible-fedora.linux_system_roles.certificate_request Invoked with name=podman_registry dns=['localhost', '127.0.0.1'] directory=/etc/pki/tls wait=True ca=self-sign __header=# # Ansible managed # # system_role:certificate provider_config_directory=/etc/certmonger provider=certmonger key_usage=['digitalSignature', 'keyEncipherment'] extended_key_usage=['id-kp-serverAuth', 'id-kp-clientAuth'] auto_renew=True ip=None email=None common_name=None country=None state=None locality=None organization=None organizational_unit=None contact_email=None key_size=None owner=None group=None mode=None principal=None run_before=None run_after=None Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[10144]: Certificate in file "/etc/pki/tls/certs/podman_registry.crt" issued by CA and saved. Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:11 managed-node1 python3.12[10275]: ansible-slurp Invoked with path=/etc/pki/tls/certs/podman_registry.crt src=/etc/pki/tls/certs/podman_registry.crt Jan 18 11:28:11 managed-node1 python3.12[10406]: ansible-slurp Invoked with path=/etc/pki/tls/private/podman_registry.key src=/etc/pki/tls/private/podman_registry.key Jan 18 11:28:11 managed-node1 python3.12[10537]: ansible-slurp Invoked with path=/etc/pki/tls/certs/podman_registry.crt src=/etc/pki/tls/certs/podman_registry.crt Jan 18 11:28:12 managed-node1 python3.12[10668]: ansible-ansible.legacy.command Invoked with _raw_params=getcert stop-tracking -f /etc/pki/tls/certs/podman_registry.crt _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jan 18 11:28:12 managed-node1 certmonger[9971]: 2025-01-18 11:28:12 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:12 managed-node1 python3.12[10800]: ansible-file Invoked with path=/etc/pki/tls/certs/podman_registry.crt state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jan 18 11:28:13 managed-node1 python3.12[10931]: ansible-file Invoked with path=/etc/pki/tls/private/podman_registry.key state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jan 18 11:28:13 managed-node1 python3.12[11062]: ansible-file Invoked with path=/etc/pki/tls/certs/podman_registry.crt state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jan 18 11:28:14 managed-node1 python3.12[11193]: ansible-ansible.legacy.stat Invoked with path=/tmp/lsr_pjrhafco_podman/auth/registry_cert.crt follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Jan 18 11:28:14 managed-node1 python3.12[11298]: ansible-ansible.legacy.copy Invoked with dest=/tmp/lsr_pjrhafco_podman/auth/registry_cert.crt mode=0600 src=/root/.ansible/tmp/ansible-tmp-1737217693.814509-7893-50960251601282/.source.crt _original_basename=.sf0hyasi follow=False checksum=e2d7e6ef6749810cf6e2d8aae103158b6bc05f37 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jan 18 11:28:14 managed-node1 python3.12[11429]: ansible-ansible.legacy.stat Invoked with path=/tmp/lsr_pjrhafco_podman/auth/registry_key.pem follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Jan 18 11:28:15 managed-node1 python3.12[11534]: ansible-ansible.legacy.copy Invoked with dest=/tmp/lsr_pjrhafco_podman/auth/registry_key.pem mode=0600 src=/root/.ansible/tmp/ansible-tmp-1737217694.6593502-7925-207039514606112/.source.pem _original_basename=.x4xwg5es follow=False checksum=69c8e519848469056bae04f7cb3e5bd8d039ff36 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jan 18 11:28:15 managed-node1 python3.12[11665]: ansible-ansible.legacy.stat Invoked with path=/tmp/lsr_pjrhafco_podman/auth/ca.crt follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Jan 18 11:28:16 managed-node1 python3.12[11770]: ansible-ansible.legacy.copy Invoked with dest=/tmp/lsr_pjrhafco_podman/auth/ca.crt mode=0600 src=/root/.ansible/tmp/ansible-tmp-1737217695.4154303-7964-28971117670208/.source.crt _original_basename=.c_gb9adz follow=False checksum=e2d7e6ef6749810cf6e2d8aae103158b6bc05f37 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jan 18 11:28:16 managed-node1 python3.12[11901]: ansible-ansible.legacy.dnf Invoked with name=['httpd-tools', 'skopeo'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None Jan 18 11:28:18 managed-node1 systemd[1]: Started run-p11915-i12215.service - [systemd-run] /usr/bin/systemctl start man-db-cache-update. ░░ Subject: A start job for unit run-p11915-i12215.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit run-p11915-i12215.service has finished successfully. ░░ ░░ The job identifier is 1707. Jan 18 11:28:18 managed-node1 systemd[1]: Starting man-db-cache-update.service... ░░ Subject: A start job for unit man-db-cache-update.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit man-db-cache-update.service has begun execution. ░░ ░░ The job identifier is 1785. Jan 18 11:28:19 managed-node1 systemd[1]: man-db-cache-update.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit man-db-cache-update.service has successfully entered the 'dead' state. Jan 18 11:28:19 managed-node1 systemd[1]: Finished man-db-cache-update.service. ░░ Subject: A start job for unit man-db-cache-update.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit man-db-cache-update.service has finished successfully. ░░ ░░ The job identifier is 1785. Jan 18 11:28:19 managed-node1 systemd[1]: run-p11915-i12215.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-p11915-i12215.service has successfully entered the 'dead' state. Jan 18 11:28:20 managed-node1 python3.12[12422]: ansible-ansible.legacy.command Invoked with _raw_params=podman run -d -p 127.0.0.1:5000:5000 --name podman_registry -v /tmp/lsr_pjrhafco_podman/auth:/auth:Z -e REGISTRY_AUTH=htpasswd -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" -e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd -e REGISTRY_HTTP_TLS_CERTIFICATE=/auth/registry_cert.crt -e REGISTRY_HTTP_TLS_KEY=/auth/registry_key.pem quay.io/libpod/registry:2.8.2 _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jan 18 11:28:20 managed-node1 systemd[1]: var-lib-containers-storage-overlay-compat2297044060-merged.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay-compat2297044060-merged.mount has successfully entered the 'dead' state. Jan 18 11:28:20 managed-node1 kernel: evm: overlay not supported Jan 18 11:28:20 managed-node1 systemd[1]: var-lib-containers-storage-overlay-metacopy\x2dcheck3987118804-merged.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay-metacopy\x2dcheck3987118804-merged.mount has successfully entered the 'dead' state. Jan 18 11:28:20 managed-node1 podman[12423]: 2025-01-18 11:28:20.843076291 -0500 EST m=+0.079374761 system refresh Jan 18 11:28:22 managed-node1 systemd[1]: var-lib-containers-storage-overlay-compat2846827946-lower\x2dmapped.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay-compat2846827946-lower\x2dmapped.mount has successfully entered the 'dead' state. Jan 18 11:28:23 managed-node1 podman[12423]: 2025-01-18 11:28:23.234995452 -0500 EST m=+2.471294120 image pull 0030ba3d620c647159c935ee778991c68ef3e51a274703753b0bc530104ef5e5 quay.io/libpod/registry:2.8.2 Jan 18 11:28:23 managed-node1 podman[12423]: 2025-01-18 11:28:23.255040284 -0500 EST m=+2.491338773 volume create 5b91e845725b9a38c4db654f5a648d0520f98a70531e23c7c4ebc3dd02be25a3 Jan 18 11:28:23 managed-node1 podman[12423]: 2025-01-18 11:28:23.265995799 -0500 EST m=+2.502294290 container create ed1781922e1bec043a5e91446e718c6b311b91797a840ed09881ada3ac6379b7 (image=quay.io/libpod/registry:2.8.2, name=podman_registry) Jan 18 11:28:23 managed-node1 kernel: bridge: filtering via arp/ip/ip6tables is no longer available by default. Update your scripts to load br_netfilter if you need this. Jan 18 11:28:23 managed-node1 NetworkManager[711]: [1737217703.3121] manager: (podman0): new Bridge device (/org/freedesktop/NetworkManager/Devices/3) Jan 18 11:28:23 managed-node1 (udev-worker)[12509]: Network interface NamePolicy= disabled on kernel command line. Jan 18 11:28:23 managed-node1 kernel: podman0: port 1(veth0) entered blocking state Jan 18 11:28:23 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:28:23 managed-node1 kernel: veth0: entered allmulticast mode Jan 18 11:28:23 managed-node1 kernel: veth0: entered promiscuous mode Jan 18 11:28:23 managed-node1 kernel: podman0: port 1(veth0) entered blocking state Jan 18 11:28:23 managed-node1 kernel: podman0: port 1(veth0) entered forwarding state Jan 18 11:28:23 managed-node1 NetworkManager[711]: [1737217703.3266] manager: (veth0): new Veth device (/org/freedesktop/NetworkManager/Devices/4) Jan 18 11:28:23 managed-node1 NetworkManager[711]: [1737217703.3298] device (podman0): carrier: link connected Jan 18 11:28:23 managed-node1 NetworkManager[711]: [1737217703.3299] device (veth0): carrier: link connected Jan 18 11:28:23 managed-node1 NetworkManager[711]: [1737217703.3324] device (podman0): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external') Jan 18 11:28:23 managed-node1 NetworkManager[711]: [1737217703.3337] device (podman0): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'external') Jan 18 11:28:23 managed-node1 NetworkManager[711]: [1737217703.3343] device (podman0): Activation: starting connection 'podman0' (7e61ee2e-4601-477b-80d6-a04deed50d8c) Jan 18 11:28:23 managed-node1 NetworkManager[711]: [1737217703.3344] device (podman0): state change: disconnected -> prepare (reason 'none', managed-type: 'external') Jan 18 11:28:23 managed-node1 NetworkManager[711]: [1737217703.3346] device (podman0): state change: prepare -> config (reason 'none', managed-type: 'external') Jan 18 11:28:23 managed-node1 (udev-worker)[12430]: Network interface NamePolicy= disabled on kernel command line. Jan 18 11:28:23 managed-node1 NetworkManager[711]: [1737217703.3373] device (podman0): state change: config -> ip-config (reason 'none', managed-type: 'external') Jan 18 11:28:23 managed-node1 NetworkManager[711]: [1737217703.3375] device (podman0): state change: ip-config -> ip-check (reason 'none', managed-type: 'external') Jan 18 11:28:23 managed-node1 systemd[1]: Starting NetworkManager-dispatcher.service - Network Manager Script Dispatcher Service... ░░ Subject: A start job for unit NetworkManager-dispatcher.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has begun execution. ░░ ░░ The job identifier is 1863. Jan 18 11:28:23 managed-node1 systemd[1]: Started NetworkManager-dispatcher.service - Network Manager Script Dispatcher Service. ░░ Subject: A start job for unit NetworkManager-dispatcher.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has finished successfully. ░░ ░░ The job identifier is 1863. Jan 18 11:28:23 managed-node1 NetworkManager[711]: [1737217703.3958] device (podman0): state change: ip-check -> secondaries (reason 'none', managed-type: 'external') Jan 18 11:28:23 managed-node1 NetworkManager[711]: [1737217703.3961] device (podman0): state change: secondaries -> activated (reason 'none', managed-type: 'external') Jan 18 11:28:23 managed-node1 NetworkManager[711]: [1737217703.3966] device (podman0): Activation: successful, device activated. Jan 18 11:28:23 managed-node1 systemd[1]: Created slice machine.slice - Slice /machine. ░░ Subject: A start job for unit machine.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit machine.slice has finished successfully. ░░ ░░ The job identifier is 1943. Jan 18 11:28:23 managed-node1 systemd[1]: Started libpod-conmon-ed1781922e1bec043a5e91446e718c6b311b91797a840ed09881ada3ac6379b7.scope. ░░ Subject: A start job for unit libpod-conmon-ed1781922e1bec043a5e91446e718c6b311b91797a840ed09881ada3ac6379b7.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-conmon-ed1781922e1bec043a5e91446e718c6b311b91797a840ed09881ada3ac6379b7.scope has finished successfully. ░░ ░░ The job identifier is 1942. Jan 18 11:28:23 managed-node1 systemd[1]: Started libpod-ed1781922e1bec043a5e91446e718c6b311b91797a840ed09881ada3ac6379b7.scope - libcrun container. ░░ Subject: A start job for unit libpod-ed1781922e1bec043a5e91446e718c6b311b91797a840ed09881ada3ac6379b7.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-ed1781922e1bec043a5e91446e718c6b311b91797a840ed09881ada3ac6379b7.scope has finished successfully. ░░ ░░ The job identifier is 1948. Jan 18 11:28:23 managed-node1 podman[12423]: 2025-01-18 11:28:23.608384003 -0500 EST m=+2.844682591 container init ed1781922e1bec043a5e91446e718c6b311b91797a840ed09881ada3ac6379b7 (image=quay.io/libpod/registry:2.8.2, name=podman_registry) Jan 18 11:28:23 managed-node1 podman[12423]: 2025-01-18 11:28:23.611994214 -0500 EST m=+2.848292818 container start ed1781922e1bec043a5e91446e718c6b311b91797a840ed09881ada3ac6379b7 (image=quay.io/libpod/registry:2.8.2, name=podman_registry) Jan 18 11:28:23 managed-node1 systemd[1]: libpod-ed1781922e1bec043a5e91446e718c6b311b91797a840ed09881ada3ac6379b7.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-ed1781922e1bec043a5e91446e718c6b311b91797a840ed09881ada3ac6379b7.scope has successfully entered the 'dead' state. Jan 18 11:28:23 managed-node1 podman[12583]: 2025-01-18 11:28:23.659873262 -0500 EST m=+0.032506167 container died ed1781922e1bec043a5e91446e718c6b311b91797a840ed09881ada3ac6379b7 (image=quay.io/libpod/registry:2.8.2, name=podman_registry) Jan 18 11:28:23 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:28:23 managed-node1 kernel: veth0 (unregistering): left allmulticast mode Jan 18 11:28:23 managed-node1 kernel: veth0 (unregistering): left promiscuous mode Jan 18 11:28:23 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:28:23 managed-node1 NetworkManager[711]: [1737217703.6969] device (podman0): state change: activated -> unmanaged (reason 'unmanaged', managed-type: 'removed') Jan 18 11:28:23 managed-node1 systemd[1]: run-netns-netns\x2d3e2b2dc4\x2d7b8c\x2d05da\x2d804f\x2dd4a3f0f407c9.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-netns-netns\x2d3e2b2dc4\x2d7b8c\x2d05da\x2d804f\x2dd4a3f0f407c9.mount has successfully entered the 'dead' state. Jan 18 11:28:23 managed-node1 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-ed1781922e1bec043a5e91446e718c6b311b91797a840ed09881ada3ac6379b7-userdata-shm.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay\x2dcontainers-ed1781922e1bec043a5e91446e718c6b311b91797a840ed09881ada3ac6379b7-userdata-shm.mount has successfully entered the 'dead' state. Jan 18 11:28:23 managed-node1 systemd[1]: var-lib-containers-storage-overlay-13ef4bc2c39973b84a982a72f8a097e0906cc9de618ea45451c5c2a39e57c85d-merged.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay-13ef4bc2c39973b84a982a72f8a097e0906cc9de618ea45451c5c2a39e57c85d-merged.mount has successfully entered the 'dead' state. Jan 18 11:28:23 managed-node1 podman[12583]: 2025-01-18 11:28:23.780630912 -0500 EST m=+0.153263822 container cleanup ed1781922e1bec043a5e91446e718c6b311b91797a840ed09881ada3ac6379b7 (image=quay.io/libpod/registry:2.8.2, name=podman_registry) Jan 18 11:28:23 managed-node1 systemd[1]: libpod-conmon-ed1781922e1bec043a5e91446e718c6b311b91797a840ed09881ada3ac6379b7.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-conmon-ed1781922e1bec043a5e91446e718c6b311b91797a840ed09881ada3ac6379b7.scope has successfully entered the 'dead' state. Jan 18 11:28:24 managed-node1 python3.12[12734]: ansible-wait_for Invoked with port=5000 host=127.0.0.1 timeout=300 connect_timeout=5 delay=0 active_connection_states=['ESTABLISHED', 'FIN_WAIT1', 'FIN_WAIT2', 'SYN_RECV', 'SYN_SENT', 'TIME_WAIT'] state=started sleep=1 path=None search_regex=None exclude_hosts=None msg=None Jan 18 11:28:24 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jan 18 11:28:33 managed-node1 systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit NetworkManager-dispatcher.service has successfully entered the 'dead' state. Jan 18 11:30:20 managed-node1 systemd[1]: Starting logrotate.service - Rotate log files... ░░ Subject: A start job for unit logrotate.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit logrotate.service has begun execution. ░░ ░░ The job identifier is 1954. Jan 18 11:30:20 managed-node1 systemd[1]: logrotate.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit logrotate.service has successfully entered the 'dead' state. Jan 18 11:30:20 managed-node1 systemd[1]: Finished logrotate.service - Rotate log files. ░░ Subject: A start job for unit logrotate.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit logrotate.service has finished successfully. ░░ ░░ The job identifier is 1954. Jan 18 11:31:46 managed-node1 systemd[4341]: Created slice background.slice - User Background Tasks Slice. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 14. Jan 18 11:31:46 managed-node1 systemd[4341]: Starting systemd-tmpfiles-clean.service - Cleanup of User's Temporary Files and Directories... ░░ Subject: A start job for unit UNIT has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has begun execution. ░░ ░░ The job identifier is 13. Jan 18 11:31:46 managed-node1 systemd[4341]: Finished systemd-tmpfiles-clean.service - Cleanup of User's Temporary Files and Directories. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 13. Jan 18 11:33:26 managed-node1 python3.12[12915]: ansible-ansible.legacy.setup Invoked with gather_subset=['all'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d Jan 18 11:33:27 managed-node1 python3.12[13075]: ansible-stat Invoked with path=/run/ostree-booted follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jan 18 11:33:27 managed-node1 python3.12[13206]: ansible-stat Invoked with path=/sbin/transactional-update follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jan 18 11:33:29 managed-node1 python3.12[13468]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jan 18 11:33:30 managed-node1 python3.12[13605]: ansible-getent Invoked with database=passwd key=root fail_key=False service=None split=None Jan 18 11:33:31 managed-node1 python3.12[13737]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jan 18 11:33:34 managed-node1 python3.12[13870]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jan 18 11:33:36 managed-node1 python3.12[14003]: ansible-file Invoked with path=/etc/containers/systemd state=directory owner=root group=0 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Jan 18 11:33:37 managed-node1 python3.12[14134]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/systemd/quadlet-pod-pod.pod follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Jan 18 11:33:37 managed-node1 python3.12[14239]: ansible-ansible.legacy.copy Invoked with src=/root/.ansible/tmp/ansible-tmp-1737218016.9384396-19502-222946066402839/.source.pod dest=/etc/containers/systemd/quadlet-pod-pod.pod owner=root group=0 mode=0644 follow=False _original_basename=systemd.j2 checksum=1884c880482430d8bf2e944b003734fb8b7a462d backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None Jan 18 11:33:38 managed-node1 python3.12[14370]: ansible-systemd Invoked with daemon_reload=True scope=system daemon_reexec=False no_block=False name=None state=None enabled=None force=None masked=None Jan 18 11:33:38 managed-node1 systemd[1]: Reload requested from client PID 14371 ('systemctl') (unit session-5.scope)... Jan 18 11:33:38 managed-node1 systemd[1]: Reloading... Jan 18 11:33:38 managed-node1 systemd-ssh-generator[14418]: Failed to query local AF_VSOCK CID: Permission denied Jan 18 11:33:38 managed-node1 systemd-rc-local-generator[14416]: /etc/rc.d/rc.local is not marked executable, skipping. Jan 18 11:33:38 managed-node1 (sd-exec-[14390]: /usr/lib/systemd/system-generators/systemd-ssh-generator failed with exit status 1. Jan 18 11:33:39 managed-node1 systemd[1]: Reloading finished in 198 ms. Jan 18 11:33:39 managed-node1 systemd[1]: Starting fstrim.service - Discard unused blocks on filesystems from /etc/fstab... ░░ Subject: A start job for unit fstrim.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit fstrim.service has begun execution. ░░ ░░ The job identifier is 2032. Jan 18 11:33:39 managed-node1 systemd[1]: fstrim.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit fstrim.service has successfully entered the 'dead' state. Jan 18 11:33:39 managed-node1 systemd[1]: Finished fstrim.service - Discard unused blocks on filesystems from /etc/fstab. ░░ Subject: A start job for unit fstrim.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit fstrim.service has finished successfully. ░░ ░░ The job identifier is 2032. Jan 18 11:33:39 managed-node1 python3.12[14558]: ansible-systemd Invoked with name=quadlet-pod-pod-pod.service scope=system state=started daemon_reload=False daemon_reexec=False no_block=False enabled=None force=None masked=None Jan 18 11:33:39 managed-node1 systemd[1]: Starting quadlet-pod-pod-pod.service... ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has begun execution. ░░ ░░ The job identifier is 2110. Jan 18 11:33:40 managed-node1 systemd[1]: var-lib-containers-storage-overlay-4e5b681e4812b861c05696cf9eda4e2b798987d072ef1cbc2f4535e4de1a52d2-merged.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay-4e5b681e4812b861c05696cf9eda4e2b798987d072ef1cbc2f4535e4de1a52d2-merged.mount has successfully entered the 'dead' state. Jan 18 11:33:40 managed-node1 podman[14562]: 2025-01-18 11:33:40.052580435 -0500 EST m=+0.295779721 image build 19fab7a77b4482d542bc2a8ec7a82b765cf4ab7fe7d20dc420147eb574bb42a8 Jan 18 11:33:40 managed-node1 systemd[1]: Created slice machine-libpod_pod_673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3.slice - cgroup machine-libpod_pod_673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3.slice. ░░ Subject: A start job for unit machine-libpod_pod_673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit machine-libpod_pod_673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3.slice has finished successfully. ░░ ░░ The job identifier is 2194. Jan 18 11:33:40 managed-node1 podman[14562]: 2025-01-18 11:33:40.101633199 -0500 EST m=+0.344832376 container create 392362ac44baf9c0901df4ae07776e7099c83a4f2a836a7f761d369f9ea832d6 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:40 managed-node1 podman[14562]: 2025-01-18 11:33:40.107607567 -0500 EST m=+0.350806715 pod create 673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3 (image=, name=quadlet-pod) Jan 18 11:33:40 managed-node1 quadlet-pod-pod-pod[14562]: 673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3 Jan 18 11:33:40 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jan 18 11:33:40 managed-node1 kernel: podman0: port 1(veth0) entered blocking state Jan 18 11:33:40 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:33:40 managed-node1 kernel: veth0: entered allmulticast mode Jan 18 11:33:40 managed-node1 kernel: veth0: entered promiscuous mode Jan 18 11:33:40 managed-node1 kernel: podman0: port 1(veth0) entered blocking state Jan 18 11:33:40 managed-node1 kernel: podman0: port 1(veth0) entered forwarding state Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.1665] manager: (podman0): new Bridge device (/org/freedesktop/NetworkManager/Devices/5) Jan 18 11:33:40 managed-node1 (udev-worker)[14624]: Network interface NamePolicy= disabled on kernel command line. Jan 18 11:33:40 managed-node1 (udev-worker)[14625]: Network interface NamePolicy= disabled on kernel command line. Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.1749] device (veth0): carrier: link connected Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.1752] manager: (veth0): new Veth device (/org/freedesktop/NetworkManager/Devices/6) Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.1755] device (podman0): carrier: link connected Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.2057] device (podman0): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external') Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.2064] device (podman0): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'external') Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.2072] device (podman0): Activation: starting connection 'podman0' (5734d0f6-22f7-4bd2-8489-7ac9c0a2ae63) Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.2076] device (podman0): state change: disconnected -> prepare (reason 'none', managed-type: 'external') Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.2078] device (podman0): state change: prepare -> config (reason 'none', managed-type: 'external') Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.2080] device (podman0): state change: config -> ip-config (reason 'none', managed-type: 'external') Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.2083] device (podman0): state change: ip-config -> ip-check (reason 'none', managed-type: 'external') Jan 18 11:33:40 managed-node1 systemd[1]: Starting NetworkManager-dispatcher.service - Network Manager Script Dispatcher Service... ░░ Subject: A start job for unit NetworkManager-dispatcher.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has begun execution. ░░ ░░ The job identifier is 2200. Jan 18 11:33:40 managed-node1 systemd[1]: Started NetworkManager-dispatcher.service - Network Manager Script Dispatcher Service. ░░ Subject: A start job for unit NetworkManager-dispatcher.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has finished successfully. ░░ ░░ The job identifier is 2200. Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.2397] device (podman0): state change: ip-check -> secondaries (reason 'none', managed-type: 'external') Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.2401] device (podman0): state change: secondaries -> activated (reason 'none', managed-type: 'external') Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.2406] device (podman0): Activation: successful, device activated. Jan 18 11:33:40 managed-node1 systemd[1]: Started libpod-392362ac44baf9c0901df4ae07776e7099c83a4f2a836a7f761d369f9ea832d6.scope - libcrun container. ░░ Subject: A start job for unit libpod-392362ac44baf9c0901df4ae07776e7099c83a4f2a836a7f761d369f9ea832d6.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-392362ac44baf9c0901df4ae07776e7099c83a4f2a836a7f761d369f9ea832d6.scope has finished successfully. ░░ ░░ The job identifier is 2279. Jan 18 11:33:40 managed-node1 podman[14614]: 2025-01-18 11:33:40.264236296 -0500 EST m=+0.133852961 container init 392362ac44baf9c0901df4ae07776e7099c83a4f2a836a7f761d369f9ea832d6 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:40 managed-node1 podman[14614]: 2025-01-18 11:33:40.267313575 -0500 EST m=+0.136930241 container start 392362ac44baf9c0901df4ae07776e7099c83a4f2a836a7f761d369f9ea832d6 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:40 managed-node1 systemd[1]: libpod-392362ac44baf9c0901df4ae07776e7099c83a4f2a836a7f761d369f9ea832d6.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-392362ac44baf9c0901df4ae07776e7099c83a4f2a836a7f761d369f9ea832d6.scope has successfully entered the 'dead' state. Jan 18 11:33:40 managed-node1 podman[14614]: 2025-01-18 11:33:40.273314347 -0500 EST m=+0.142930998 pod start 673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3 (image=, name=quadlet-pod) Jan 18 11:33:40 managed-node1 quadlet-pod-pod-pod[14614]: quadlet-pod Jan 18 11:33:40 managed-node1 systemd[1]: Started quadlet-pod-pod-pod.service. ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has finished successfully. ░░ ░░ The job identifier is 2110. Jan 18 11:33:40 managed-node1 podman[14670]: 2025-01-18 11:33:40.309469316 -0500 EST m=+0.030173893 container died 392362ac44baf9c0901df4ae07776e7099c83a4f2a836a7f761d369f9ea832d6 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:40 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:33:40 managed-node1 kernel: veth0 (unregistering): left allmulticast mode Jan 18 11:33:40 managed-node1 kernel: veth0 (unregistering): left promiscuous mode Jan 18 11:33:40 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.3526] device (podman0): state change: activated -> unmanaged (reason 'unmanaged', managed-type: 'removed') Jan 18 11:33:40 managed-node1 podman[14670]: 2025-01-18 11:33:40.407060902 -0500 EST m=+0.127765289 container cleanup 392362ac44baf9c0901df4ae07776e7099c83a4f2a836a7f761d369f9ea832d6 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:40 managed-node1 podman[14670]: 2025-01-18 11:33:40.408230812 -0500 EST m=+0.128935235 pod stop 673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3 (image=, name=quadlet-pod) Jan 18 11:33:40 managed-node1 systemd[1]: Removed slice machine-libpod_pod_673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3.slice - cgroup machine-libpod_pod_673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3.slice. ░░ Subject: A stop job for unit machine-libpod_pod_673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3.slice has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit machine-libpod_pod_673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3.slice has finished. ░░ ░░ The job identifier is 2286 and the job result is done. Jan 18 11:33:40 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Main process exited, code=exited, status=1/FAILURE ░░ Subject: Unit process exited ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ An ExecStart= process belonging to unit quadlet-pod-pod-pod.service has exited. ░░ ░░ The process' exit code is 'exited' and its exit status is 1. Jan 18 11:33:40 managed-node1 podman[14714]: 2025-01-18 11:33:40.492903497 -0500 EST m=+0.049470406 container remove 392362ac44baf9c0901df4ae07776e7099c83a4f2a836a7f761d369f9ea832d6 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:40 managed-node1 podman[14714]: 2025-01-18 11:33:40.503091083 -0500 EST m=+0.059657964 pod remove 673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3 (image=, name=quadlet-pod) Jan 18 11:33:40 managed-node1 quadlet-pod-pod-pod[14714]: 673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3 Jan 18 11:33:40 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Failed with result 'exit-code'. ░░ Subject: Unit failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit quadlet-pod-pod-pod.service has entered the 'failed' state with result 'exit-code'. Jan 18 11:33:40 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Scheduled restart job, restart counter is at 1. ░░ Subject: Automatic restarting of a unit has been scheduled ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ Automatic restarting of the unit quadlet-pod-pod-pod.service has been scheduled, as the result for ░░ the configured Restart= setting for the unit. Jan 18 11:33:40 managed-node1 systemd[1]: Starting quadlet-pod-pod-pod.service... ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has begun execution. ░░ ░░ The job identifier is 2288. Jan 18 11:33:40 managed-node1 systemd[1]: Created slice machine-libpod_pod_fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692.slice - cgroup machine-libpod_pod_fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692.slice. ░░ Subject: A start job for unit machine-libpod_pod_fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit machine-libpod_pod_fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692.slice has finished successfully. ░░ ░░ The job identifier is 2372. Jan 18 11:33:40 managed-node1 podman[14724]: 2025-01-18 11:33:40.867584006 -0500 EST m=+0.076424916 container create 71d159e1b2eba1445354a682ab267c12b7ea2f67918a5cad6861c85c697e28de (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692, io.buildah.version=1.38.0, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service) Jan 18 11:33:40 managed-node1 podman[14724]: 2025-01-18 11:33:40.873786077 -0500 EST m=+0.082626984 pod create fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692 (image=, name=quadlet-pod) Jan 18 11:33:40 managed-node1 quadlet-pod-pod-pod[14724]: fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692 Jan 18 11:33:40 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.9275] manager: (podman0): new Bridge device (/org/freedesktop/NetworkManager/Devices/7) Jan 18 11:33:40 managed-node1 kernel: podman0: port 1(veth0) entered blocking state Jan 18 11:33:40 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:33:40 managed-node1 kernel: veth0: entered allmulticast mode Jan 18 11:33:40 managed-node1 kernel: veth0: entered promiscuous mode Jan 18 11:33:40 managed-node1 (udev-worker)[14645]: Network interface NamePolicy= disabled on kernel command line. Jan 18 11:33:40 managed-node1 kernel: podman0: port 1(veth0) entered blocking state Jan 18 11:33:40 managed-node1 kernel: podman0: port 1(veth0) entered forwarding state Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.9424] manager: (veth0): new Veth device (/org/freedesktop/NetworkManager/Devices/8) Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.9473] device (podman0): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external') Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.9479] device (podman0): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'external') Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.9485] device (podman0): Activation: starting connection 'podman0' (f19726fd-c620-48a6-81ec-88aca5176477) Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.9496] device (veth0): carrier: link connected Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.9501] device (podman0): carrier: link connected Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.9504] device (podman0): state change: disconnected -> prepare (reason 'none', managed-type: 'external') Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.9510] device (podman0): state change: prepare -> config (reason 'none', managed-type: 'external') Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.9513] device (podman0): state change: config -> ip-config (reason 'none', managed-type: 'external') Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.9517] device (podman0): state change: ip-config -> ip-check (reason 'none', managed-type: 'external') Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.9564] device (podman0): state change: ip-check -> secondaries (reason 'none', managed-type: 'external') Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.9615] device (podman0): state change: secondaries -> activated (reason 'none', managed-type: 'external') Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.9620] device (podman0): Activation: successful, device activated. Jan 18 11:33:41 managed-node1 systemd[1]: Started libpod-71d159e1b2eba1445354a682ab267c12b7ea2f67918a5cad6861c85c697e28de.scope - libcrun container. ░░ Subject: A start job for unit libpod-71d159e1b2eba1445354a682ab267c12b7ea2f67918a5cad6861c85c697e28de.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-71d159e1b2eba1445354a682ab267c12b7ea2f67918a5cad6861c85c697e28de.scope has finished successfully. ░░ ░░ The job identifier is 2378. Jan 18 11:33:41 managed-node1 podman[14733]: 2025-01-18 11:33:41.024075225 -0500 EST m=+0.130462979 container init 71d159e1b2eba1445354a682ab267c12b7ea2f67918a5cad6861c85c697e28de (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:41 managed-node1 podman[14733]: 2025-01-18 11:33:41.026932567 -0500 EST m=+0.133320364 container start 71d159e1b2eba1445354a682ab267c12b7ea2f67918a5cad6861c85c697e28de (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692, io.buildah.version=1.38.0, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service) Jan 18 11:33:41 managed-node1 systemd[1]: libpod-71d159e1b2eba1445354a682ab267c12b7ea2f67918a5cad6861c85c697e28de.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-71d159e1b2eba1445354a682ab267c12b7ea2f67918a5cad6861c85c697e28de.scope has successfully entered the 'dead' state. Jan 18 11:33:41 managed-node1 conmon[14759]: conmon 71d159e1b2eba1445354 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/machine-libpod_pod_fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692.slice/libpod-71d159e1b2eba1445354a682ab267c12b7ea2f67918a5cad6861c85c697e28de.scope/container/memory.events Jan 18 11:33:41 managed-node1 podman[14733]: 2025-01-18 11:33:41.033064726 -0500 EST m=+0.139452443 pod start fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692 (image=, name=quadlet-pod) Jan 18 11:33:41 managed-node1 quadlet-pod-pod-pod[14733]: quadlet-pod Jan 18 11:33:41 managed-node1 systemd[1]: Started quadlet-pod-pod-pod.service. ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has finished successfully. ░░ ░░ The job identifier is 2288. Jan 18 11:33:41 managed-node1 podman[14763]: 2025-01-18 11:33:41.067125334 -0500 EST m=+0.024761993 container died 71d159e1b2eba1445354a682ab267c12b7ea2f67918a5cad6861c85c697e28de (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:41 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:33:41 managed-node1 kernel: veth0 (unregistering): left allmulticast mode Jan 18 11:33:41 managed-node1 kernel: veth0 (unregistering): left promiscuous mode Jan 18 11:33:41 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:33:41 managed-node1 NetworkManager[711]: [1737218021.1029] device (podman0): state change: activated -> unmanaged (reason 'unmanaged', managed-type: 'removed') Jan 18 11:33:41 managed-node1 podman[14763]: 2025-01-18 11:33:41.150359966 -0500 EST m=+0.107996812 container cleanup 71d159e1b2eba1445354a682ab267c12b7ea2f67918a5cad6861c85c697e28de (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:41 managed-node1 podman[14763]: 2025-01-18 11:33:41.151414399 -0500 EST m=+0.109051037 pod stop fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692 (image=, name=quadlet-pod) Jan 18 11:33:41 managed-node1 systemd[1]: Removed slice machine-libpod_pod_fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692.slice - cgroup machine-libpod_pod_fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692.slice. ░░ Subject: A stop job for unit machine-libpod_pod_fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692.slice has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit machine-libpod_pod_fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692.slice has finished. ░░ ░░ The job identifier is 2385 and the job result is done. Jan 18 11:33:41 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Main process exited, code=exited, status=1/FAILURE ░░ Subject: Unit process exited ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ An ExecStart= process belonging to unit quadlet-pod-pod-pod.service has exited. ░░ ░░ The process' exit code is 'exited' and its exit status is 1. Jan 18 11:33:41 managed-node1 podman[14787]: 2025-01-18 11:33:41.2547741 -0500 EST m=+0.062974793 container remove 71d159e1b2eba1445354a682ab267c12b7ea2f67918a5cad6861c85c697e28de (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:41 managed-node1 podman[14787]: 2025-01-18 11:33:41.268902616 -0500 EST m=+0.077103294 pod remove fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692 (image=, name=quadlet-pod) Jan 18 11:33:41 managed-node1 quadlet-pod-pod-pod[14787]: fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692 Jan 18 11:33:41 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Failed with result 'exit-code'. ░░ Subject: Unit failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit quadlet-pod-pod-pod.service has entered the 'failed' state with result 'exit-code'. Jan 18 11:33:41 managed-node1 python3.12[14906]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jan 18 11:33:41 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Scheduled restart job, restart counter is at 2. ░░ Subject: Automatic restarting of a unit has been scheduled ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ Automatic restarting of the unit quadlet-pod-pod-pod.service has been scheduled, as the result for ░░ the configured Restart= setting for the unit. Jan 18 11:33:41 managed-node1 systemd[1]: Starting quadlet-pod-pod-pod.service... ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has begun execution. ░░ ░░ The job identifier is 2387. Jan 18 11:33:41 managed-node1 systemd[1]: Created slice machine-libpod_pod_5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746.slice - cgroup machine-libpod_pod_5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746.slice. ░░ Subject: A start job for unit machine-libpod_pod_5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit machine-libpod_pod_5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746.slice has finished successfully. ░░ ░░ The job identifier is 2471. Jan 18 11:33:41 managed-node1 podman[14930]: 2025-01-18 11:33:41.67679635 -0500 EST m=+0.071343444 container create cc87a80782819afba977965f28149532f75c8f54e7cab9c4ee626de0ef6a7ed2 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:41 managed-node1 podman[14930]: 2025-01-18 11:33:41.683125506 -0500 EST m=+0.077672604 pod create 5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746 (image=, name=quadlet-pod) Jan 18 11:33:41 managed-node1 quadlet-pod-pod-pod[14930]: 5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746 Jan 18 11:33:41 managed-node1 NetworkManager[711]: [1737218021.7309] manager: (podman0): new Bridge device (/org/freedesktop/NetworkManager/Devices/9) Jan 18 11:33:41 managed-node1 kernel: podman0: port 1(veth0) entered blocking state Jan 18 11:33:41 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:33:41 managed-node1 kernel: veth0: entered allmulticast mode Jan 18 11:33:41 managed-node1 kernel: veth0: entered promiscuous mode Jan 18 11:33:41 managed-node1 kernel: podman0: port 1(veth0) entered blocking state Jan 18 11:33:41 managed-node1 kernel: podman0: port 1(veth0) entered forwarding state Jan 18 11:33:41 managed-node1 NetworkManager[711]: [1737218021.7401] manager: (veth0): new Veth device (/org/freedesktop/NetworkManager/Devices/10) Jan 18 11:33:41 managed-node1 NetworkManager[711]: [1737218021.7413] device (veth0): carrier: link connected Jan 18 11:33:41 managed-node1 NetworkManager[711]: [1737218021.7415] device (podman0): carrier: link connected Jan 18 11:33:41 managed-node1 NetworkManager[711]: [1737218021.7619] device (podman0): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external') Jan 18 11:33:41 managed-node1 NetworkManager[711]: [1737218021.7626] device (podman0): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'external') Jan 18 11:33:41 managed-node1 NetworkManager[711]: [1737218021.7634] device (podman0): Activation: starting connection 'podman0' (578d14fd-34f6-4f03-9983-1048994dadcb) Jan 18 11:33:41 managed-node1 NetworkManager[711]: [1737218021.7639] device (podman0): state change: disconnected -> prepare (reason 'none', managed-type: 'external') Jan 18 11:33:41 managed-node1 NetworkManager[711]: [1737218021.7642] device (podman0): state change: prepare -> config (reason 'none', managed-type: 'external') Jan 18 11:33:41 managed-node1 NetworkManager[711]: [1737218021.7645] device (podman0): state change: config -> ip-config (reason 'none', managed-type: 'external') Jan 18 11:33:41 managed-node1 NetworkManager[711]: [1737218021.7649] device (podman0): state change: ip-config -> ip-check (reason 'none', managed-type: 'external') Jan 18 11:33:41 managed-node1 NetworkManager[711]: [1737218021.7709] device (podman0): state change: ip-check -> secondaries (reason 'none', managed-type: 'external') Jan 18 11:33:41 managed-node1 NetworkManager[711]: [1737218021.7710] device (podman0): state change: secondaries -> activated (reason 'none', managed-type: 'external') Jan 18 11:33:41 managed-node1 NetworkManager[711]: [1737218021.7716] device (podman0): Activation: successful, device activated. Jan 18 11:33:41 managed-node1 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-71d159e1b2eba1445354a682ab267c12b7ea2f67918a5cad6861c85c697e28de-userdata-shm.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay\x2dcontainers-71d159e1b2eba1445354a682ab267c12b7ea2f67918a5cad6861c85c697e28de-userdata-shm.mount has successfully entered the 'dead' state. Jan 18 11:33:41 managed-node1 systemd[1]: Started libpod-cc87a80782819afba977965f28149532f75c8f54e7cab9c4ee626de0ef6a7ed2.scope - libcrun container. ░░ Subject: A start job for unit libpod-cc87a80782819afba977965f28149532f75c8f54e7cab9c4ee626de0ef6a7ed2.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-cc87a80782819afba977965f28149532f75c8f54e7cab9c4ee626de0ef6a7ed2.scope has finished successfully. ░░ ░░ The job identifier is 2477. Jan 18 11:33:41 managed-node1 podman[14938]: 2025-01-18 11:33:41.833984995 -0500 EST m=+0.133229395 container init cc87a80782819afba977965f28149532f75c8f54e7cab9c4ee626de0ef6a7ed2 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:41 managed-node1 podman[14938]: 2025-01-18 11:33:41.836860526 -0500 EST m=+0.136105046 container start cc87a80782819afba977965f28149532f75c8f54e7cab9c4ee626de0ef6a7ed2 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:41 managed-node1 systemd[1]: libpod-cc87a80782819afba977965f28149532f75c8f54e7cab9c4ee626de0ef6a7ed2.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-cc87a80782819afba977965f28149532f75c8f54e7cab9c4ee626de0ef6a7ed2.scope has successfully entered the 'dead' state. Jan 18 11:33:41 managed-node1 podman[14938]: 2025-01-18 11:33:41.843610464 -0500 EST m=+0.142854806 pod start 5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746 (image=, name=quadlet-pod) Jan 18 11:33:41 managed-node1 quadlet-pod-pod-pod[14938]: quadlet-pod Jan 18 11:33:41 managed-node1 systemd[1]: Started quadlet-pod-pod-pod.service. ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has finished successfully. ░░ ░░ The job identifier is 2387. Jan 18 11:33:41 managed-node1 podman[14968]: 2025-01-18 11:33:41.875395285 -0500 EST m=+0.023750684 container died cc87a80782819afba977965f28149532f75c8f54e7cab9c4ee626de0ef6a7ed2 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:41 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:33:41 managed-node1 kernel: veth0 (unregistering): left allmulticast mode Jan 18 11:33:41 managed-node1 kernel: veth0 (unregistering): left promiscuous mode Jan 18 11:33:41 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:33:41 managed-node1 NetworkManager[711]: [1737218021.9070] device (podman0): state change: activated -> unmanaged (reason 'unmanaged', managed-type: 'removed') Jan 18 11:33:41 managed-node1 systemd[1]: run-netns-netns\x2d6264de46\x2d32a9\x2d432c\x2d487b\x2d4419c043634d.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-netns-netns\x2d6264de46\x2d32a9\x2d432c\x2d487b\x2d4419c043634d.mount has successfully entered the 'dead' state. Jan 18 11:33:41 managed-node1 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-cc87a80782819afba977965f28149532f75c8f54e7cab9c4ee626de0ef6a7ed2-userdata-shm.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay\x2dcontainers-cc87a80782819afba977965f28149532f75c8f54e7cab9c4ee626de0ef6a7ed2-userdata-shm.mount has successfully entered the 'dead' state. Jan 18 11:33:41 managed-node1 systemd[1]: var-lib-containers-storage-overlay-c1e9162aeff1201990e8d156c1a1e46da6d3b4693c3f06eef70039638e5e918c-merged.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay-c1e9162aeff1201990e8d156c1a1e46da6d3b4693c3f06eef70039638e5e918c-merged.mount has successfully entered the 'dead' state. Jan 18 11:33:41 managed-node1 podman[14968]: 2025-01-18 11:33:41.953694779 -0500 EST m=+0.102050647 container cleanup cc87a80782819afba977965f28149532f75c8f54e7cab9c4ee626de0ef6a7ed2 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:41 managed-node1 podman[14968]: 2025-01-18 11:33:41.955418494 -0500 EST m=+0.103773946 pod stop 5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746 (image=, name=quadlet-pod) Jan 18 11:33:41 managed-node1 systemd[1]: Removed slice machine-libpod_pod_5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746.slice - cgroup machine-libpod_pod_5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746.slice. ░░ Subject: A stop job for unit machine-libpod_pod_5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746.slice has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit machine-libpod_pod_5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746.slice has finished. ░░ ░░ The job identifier is 2484 and the job result is done. Jan 18 11:33:41 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jan 18 11:33:41 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Main process exited, code=exited, status=1/FAILURE ░░ Subject: Unit process exited ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ An ExecStart= process belonging to unit quadlet-pod-pod-pod.service has exited. ░░ ░░ The process' exit code is 'exited' and its exit status is 1. Jan 18 11:33:42 managed-node1 podman[14991]: 2025-01-18 11:33:42.044406496 -0500 EST m=+0.050948258 container remove cc87a80782819afba977965f28149532f75c8f54e7cab9c4ee626de0ef6a7ed2 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746, io.buildah.version=1.38.0, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service) Jan 18 11:33:42 managed-node1 podman[14991]: 2025-01-18 11:33:42.056292467 -0500 EST m=+0.062834203 pod remove 5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746 (image=, name=quadlet-pod) Jan 18 11:33:42 managed-node1 quadlet-pod-pod-pod[14991]: 5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746 Jan 18 11:33:42 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Failed with result 'exit-code'. ░░ Subject: Unit failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit quadlet-pod-pod-pod.service has entered the 'failed' state with result 'exit-code'. Jan 18 11:33:42 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Scheduled restart job, restart counter is at 3. ░░ Subject: Automatic restarting of a unit has been scheduled ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ Automatic restarting of the unit quadlet-pod-pod-pod.service has been scheduled, as the result for ░░ the configured Restart= setting for the unit. Jan 18 11:33:42 managed-node1 systemd[1]: Starting quadlet-pod-pod-pod.service... ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has begun execution. ░░ ░░ The job identifier is 2486. Jan 18 11:33:42 managed-node1 systemd[1]: Created slice machine-libpod_pod_e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c.slice - cgroup machine-libpod_pod_e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c.slice. ░░ Subject: A start job for unit machine-libpod_pod_e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit machine-libpod_pod_e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c.slice has finished successfully. ░░ ░░ The job identifier is 2570. Jan 18 11:33:42 managed-node1 podman[15002]: 2025-01-18 11:33:42.428345618 -0500 EST m=+0.080049535 container create bd7e15cdccba04722af7ebc567492613e19c206a29f4f50542536c33119fc826 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:42 managed-node1 podman[15002]: 2025-01-18 11:33:42.434625701 -0500 EST m=+0.086329508 pod create e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c (image=, name=quadlet-pod) Jan 18 11:33:42 managed-node1 quadlet-pod-pod-pod[15002]: e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c Jan 18 11:33:42 managed-node1 kernel: podman0: port 1(veth0) entered blocking state Jan 18 11:33:42 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:33:42 managed-node1 kernel: veth0: entered allmulticast mode Jan 18 11:33:42 managed-node1 kernel: veth0: entered promiscuous mode Jan 18 11:33:42 managed-node1 kernel: podman0: port 1(veth0) entered blocking state Jan 18 11:33:42 managed-node1 kernel: podman0: port 1(veth0) entered forwarding state Jan 18 11:33:42 managed-node1 NetworkManager[711]: [1737218022.4932] manager: (podman0): new Bridge device (/org/freedesktop/NetworkManager/Devices/11) Jan 18 11:33:42 managed-node1 NetworkManager[711]: [1737218022.4974] device (podman0): carrier: link connected Jan 18 11:33:42 managed-node1 NetworkManager[711]: [1737218022.4984] device (veth0): carrier: link connected Jan 18 11:33:42 managed-node1 NetworkManager[711]: [1737218022.4989] manager: (veth0): new Veth device (/org/freedesktop/NetworkManager/Devices/12) Jan 18 11:33:42 managed-node1 NetworkManager[711]: [1737218022.5085] device (podman0): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external') Jan 18 11:33:42 managed-node1 NetworkManager[711]: [1737218022.5093] device (podman0): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'external') Jan 18 11:33:42 managed-node1 NetworkManager[711]: [1737218022.5100] device (podman0): Activation: starting connection 'podman0' (f8555d8f-9dd1-4a6b-8a25-8e0e9826b378) Jan 18 11:33:42 managed-node1 NetworkManager[711]: [1737218022.5103] device (podman0): state change: disconnected -> prepare (reason 'none', managed-type: 'external') Jan 18 11:33:42 managed-node1 NetworkManager[711]: [1737218022.5105] device (podman0): state change: prepare -> config (reason 'none', managed-type: 'external') Jan 18 11:33:42 managed-node1 NetworkManager[711]: [1737218022.5114] device (podman0): state change: config -> ip-config (reason 'none', managed-type: 'external') Jan 18 11:33:42 managed-node1 NetworkManager[711]: [1737218022.5125] device (podman0): state change: ip-config -> ip-check (reason 'none', managed-type: 'external') Jan 18 11:33:42 managed-node1 NetworkManager[711]: [1737218022.5155] device (podman0): state change: ip-check -> secondaries (reason 'none', managed-type: 'external') Jan 18 11:33:42 managed-node1 NetworkManager[711]: [1737218022.5163] device (podman0): state change: secondaries -> activated (reason 'none', managed-type: 'external') Jan 18 11:33:42 managed-node1 NetworkManager[711]: [1737218022.5192] device (podman0): Activation: successful, device activated. Jan 18 11:33:42 managed-node1 systemd[1]: Started libpod-bd7e15cdccba04722af7ebc567492613e19c206a29f4f50542536c33119fc826.scope - libcrun container. ░░ Subject: A start job for unit libpod-bd7e15cdccba04722af7ebc567492613e19c206a29f4f50542536c33119fc826.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-bd7e15cdccba04722af7ebc567492613e19c206a29f4f50542536c33119fc826.scope has finished successfully. ░░ ░░ The job identifier is 2576. Jan 18 11:33:42 managed-node1 podman[15010]: 2025-01-18 11:33:42.577700529 -0500 EST m=+0.123417871 container init bd7e15cdccba04722af7ebc567492613e19c206a29f4f50542536c33119fc826 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:42 managed-node1 podman[15010]: 2025-01-18 11:33:42.58088775 -0500 EST m=+0.126605192 container start bd7e15cdccba04722af7ebc567492613e19c206a29f4f50542536c33119fc826 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:42 managed-node1 systemd[1]: libpod-bd7e15cdccba04722af7ebc567492613e19c206a29f4f50542536c33119fc826.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-bd7e15cdccba04722af7ebc567492613e19c206a29f4f50542536c33119fc826.scope has successfully entered the 'dead' state. Jan 18 11:33:42 managed-node1 conmon[15038]: conmon bd7e15cdccba04722af7 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/machine-libpod_pod_e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c.slice/libpod-bd7e15cdccba04722af7ebc567492613e19c206a29f4f50542536c33119fc826.scope/container/memory.events Jan 18 11:33:42 managed-node1 podman[15010]: 2025-01-18 11:33:42.587145651 -0500 EST m=+0.132862945 pod start e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c (image=, name=quadlet-pod) Jan 18 11:33:42 managed-node1 quadlet-pod-pod-pod[15010]: quadlet-pod Jan 18 11:33:42 managed-node1 systemd[1]: Started quadlet-pod-pod-pod.service. ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has finished successfully. ░░ ░░ The job identifier is 2486. Jan 18 11:33:42 managed-node1 podman[15043]: 2025-01-18 11:33:42.620274963 -0500 EST m=+0.023752615 container died bd7e15cdccba04722af7ebc567492613e19c206a29f4f50542536c33119fc826 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:42 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:33:42 managed-node1 kernel: veth0 (unregistering): left allmulticast mode Jan 18 11:33:42 managed-node1 kernel: veth0 (unregistering): left promiscuous mode Jan 18 11:33:42 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:33:42 managed-node1 NetworkManager[711]: [1737218022.6600] device (podman0): state change: activated -> unmanaged (reason 'unmanaged', managed-type: 'removed') Jan 18 11:33:42 managed-node1 podman[15043]: 2025-01-18 11:33:42.703001322 -0500 EST m=+0.106478971 container cleanup bd7e15cdccba04722af7ebc567492613e19c206a29f4f50542536c33119fc826 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:42 managed-node1 podman[15043]: 2025-01-18 11:33:42.704842889 -0500 EST m=+0.108320516 pod stop e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c (image=, name=quadlet-pod) Jan 18 11:33:42 managed-node1 systemd[1]: Removed slice machine-libpod_pod_e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c.slice - cgroup machine-libpod_pod_e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c.slice. ░░ Subject: A stop job for unit machine-libpod_pod_e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c.slice has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit machine-libpod_pod_e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c.slice has finished. ░░ ░░ The job identifier is 2583 and the job result is done. Jan 18 11:33:42 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Main process exited, code=exited, status=1/FAILURE ░░ Subject: Unit process exited ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ An ExecStart= process belonging to unit quadlet-pod-pod-pod.service has exited. ░░ ░░ The process' exit code is 'exited' and its exit status is 1. Jan 18 11:33:42 managed-node1 podman[15066]: 2025-01-18 11:33:42.788883839 -0500 EST m=+0.051451334 container remove bd7e15cdccba04722af7ebc567492613e19c206a29f4f50542536c33119fc826 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:42 managed-node1 podman[15066]: 2025-01-18 11:33:42.799225016 -0500 EST m=+0.061792504 pod remove e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c (image=, name=quadlet-pod) Jan 18 11:33:42 managed-node1 quadlet-pod-pod-pod[15066]: e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c Jan 18 11:33:42 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jan 18 11:33:42 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Failed with result 'exit-code'. ░░ Subject: Unit failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit quadlet-pod-pod-pod.service has entered the 'failed' state with result 'exit-code'. Jan 18 11:33:43 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Scheduled restart job, restart counter is at 4. ░░ Subject: Automatic restarting of a unit has been scheduled ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ Automatic restarting of the unit quadlet-pod-pod-pod.service has been scheduled, as the result for ░░ the configured Restart= setting for the unit. Jan 18 11:33:43 managed-node1 systemd[1]: Starting quadlet-pod-pod-pod.service... ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has begun execution. ░░ ░░ The job identifier is 2585. Jan 18 11:33:43 managed-node1 systemd[1]: Created slice machine-libpod_pod_f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46.slice - cgroup machine-libpod_pod_f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46.slice. ░░ Subject: A start job for unit machine-libpod_pod_f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit machine-libpod_pod_f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46.slice has finished successfully. ░░ ░░ The job identifier is 2669. Jan 18 11:33:43 managed-node1 podman[15123]: 2025-01-18 11:33:43.177068791 -0500 EST m=+0.077884436 container create 1ea9ccb136e01b3c0ce0adb6d632480a4583a02c6e6afdbe07e90acf624cbc75 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46, io.buildah.version=1.38.0, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service) Jan 18 11:33:43 managed-node1 podman[15123]: 2025-01-18 11:33:43.183286715 -0500 EST m=+0.084102413 pod create f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46 (image=, name=quadlet-pod) Jan 18 11:33:43 managed-node1 quadlet-pod-pod-pod[15123]: f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46 Jan 18 11:33:43 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jan 18 11:33:43 managed-node1 NetworkManager[711]: [1737218023.2500] manager: (podman0): new Bridge device (/org/freedesktop/NetworkManager/Devices/13) Jan 18 11:33:43 managed-node1 kernel: podman0: port 1(veth0) entered blocking state Jan 18 11:33:43 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:33:43 managed-node1 kernel: veth0: entered allmulticast mode Jan 18 11:33:43 managed-node1 kernel: veth0: entered promiscuous mode Jan 18 11:33:43 managed-node1 kernel: podman0: port 1(veth0) entered blocking state Jan 18 11:33:43 managed-node1 kernel: podman0: port 1(veth0) entered forwarding state Jan 18 11:33:43 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:33:43 managed-node1 kernel: podman0: port 1(veth0) entered blocking state Jan 18 11:33:43 managed-node1 kernel: podman0: port 1(veth0) entered forwarding state Jan 18 11:33:43 managed-node1 NetworkManager[711]: [1737218023.2670] device (podman0): carrier: link connected Jan 18 11:33:43 managed-node1 NetworkManager[711]: [1737218023.2675] device (veth0): carrier: link connected Jan 18 11:33:43 managed-node1 NetworkManager[711]: [1737218023.2719] manager: (veth0): new Veth device (/org/freedesktop/NetworkManager/Devices/14) Jan 18 11:33:43 managed-node1 NetworkManager[711]: [1737218023.2880] device (podman0): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external') Jan 18 11:33:43 managed-node1 NetworkManager[711]: [1737218023.2892] device (podman0): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'external') Jan 18 11:33:43 managed-node1 NetworkManager[711]: [1737218023.2899] device (podman0): Activation: starting connection 'podman0' (cde6d46a-6303-4ae2-a932-44130c439952) Jan 18 11:33:43 managed-node1 NetworkManager[711]: [1737218023.2902] device (podman0): state change: disconnected -> prepare (reason 'none', managed-type: 'external') Jan 18 11:33:43 managed-node1 NetworkManager[711]: [1737218023.2904] device (podman0): state change: prepare -> config (reason 'none', managed-type: 'external') Jan 18 11:33:43 managed-node1 NetworkManager[711]: [1737218023.2905] device (podman0): state change: config -> ip-config (reason 'none', managed-type: 'external') Jan 18 11:33:43 managed-node1 NetworkManager[711]: [1737218023.2909] device (podman0): state change: ip-config -> ip-check (reason 'none', managed-type: 'external') Jan 18 11:33:43 managed-node1 NetworkManager[711]: [1737218023.3094] device (podman0): state change: ip-check -> secondaries (reason 'none', managed-type: 'external') Jan 18 11:33:43 managed-node1 NetworkManager[711]: [1737218023.3096] device (podman0): state change: secondaries -> activated (reason 'none', managed-type: 'external') Jan 18 11:33:43 managed-node1 NetworkManager[711]: [1737218023.3109] device (podman0): Activation: successful, device activated. Jan 18 11:33:43 managed-node1 systemd[1]: Started libpod-1ea9ccb136e01b3c0ce0adb6d632480a4583a02c6e6afdbe07e90acf624cbc75.scope - libcrun container. ░░ Subject: A start job for unit libpod-1ea9ccb136e01b3c0ce0adb6d632480a4583a02c6e6afdbe07e90acf624cbc75.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-1ea9ccb136e01b3c0ce0adb6d632480a4583a02c6e6afdbe07e90acf624cbc75.scope has finished successfully. ░░ ░░ The job identifier is 2675. Jan 18 11:33:43 managed-node1 podman[15132]: 2025-01-18 11:33:43.363268029 -0500 EST m=+0.157799375 container init 1ea9ccb136e01b3c0ce0adb6d632480a4583a02c6e6afdbe07e90acf624cbc75 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:43 managed-node1 podman[15132]: 2025-01-18 11:33:43.368080332 -0500 EST m=+0.162611411 container start 1ea9ccb136e01b3c0ce0adb6d632480a4583a02c6e6afdbe07e90acf624cbc75 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:43 managed-node1 conmon[15209]: conmon 1ea9ccb136e01b3c0ce0 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/machine-libpod_pod_f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46.slice/libpod-1ea9ccb136e01b3c0ce0adb6d632480a4583a02c6e6afdbe07e90acf624cbc75.scope/container/memory.events Jan 18 11:33:43 managed-node1 systemd[1]: libpod-1ea9ccb136e01b3c0ce0adb6d632480a4583a02c6e6afdbe07e90acf624cbc75.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-1ea9ccb136e01b3c0ce0adb6d632480a4583a02c6e6afdbe07e90acf624cbc75.scope has successfully entered the 'dead' state. Jan 18 11:33:43 managed-node1 podman[15132]: 2025-01-18 11:33:43.378339606 -0500 EST m=+0.172870674 pod start f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46 (image=, name=quadlet-pod) Jan 18 11:33:43 managed-node1 quadlet-pod-pod-pod[15132]: quadlet-pod Jan 18 11:33:43 managed-node1 systemd[1]: Started quadlet-pod-pod-pod.service. ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has finished successfully. ░░ ░░ The job identifier is 2585. Jan 18 11:33:43 managed-node1 podman[15227]: 2025-01-18 11:33:43.428553199 -0500 EST m=+0.039068776 container died 1ea9ccb136e01b3c0ce0adb6d632480a4583a02c6e6afdbe07e90acf624cbc75 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, io.buildah.version=1.38.0, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service) Jan 18 11:33:43 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:33:43 managed-node1 kernel: veth0 (unregistering): left allmulticast mode Jan 18 11:33:43 managed-node1 kernel: veth0 (unregistering): left promiscuous mode Jan 18 11:33:43 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:33:43 managed-node1 NetworkManager[711]: [1737218023.5174] device (podman0): state change: activated -> unmanaged (reason 'unmanaged', managed-type: 'removed') Jan 18 11:33:43 managed-node1 podman[15227]: 2025-01-18 11:33:43.574297477 -0500 EST m=+0.184812888 container cleanup 1ea9ccb136e01b3c0ce0adb6d632480a4583a02c6e6afdbe07e90acf624cbc75 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:43 managed-node1 podman[15227]: 2025-01-18 11:33:43.576291098 -0500 EST m=+0.186806507 pod stop f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46 (image=, name=quadlet-pod) Jan 18 11:33:43 managed-node1 systemd[1]: Removed slice machine-libpod_pod_f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46.slice - cgroup machine-libpod_pod_f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46.slice. ░░ Subject: A stop job for unit machine-libpod_pod_f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46.slice has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit machine-libpod_pod_f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46.slice has finished. ░░ ░░ The job identifier is 2682 and the job result is done. Jan 18 11:33:43 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Main process exited, code=exited, status=1/FAILURE ░░ Subject: Unit process exited ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ An ExecStart= process belonging to unit quadlet-pod-pod-pod.service has exited. ░░ ░░ The process' exit code is 'exited' and its exit status is 1. Jan 18 11:33:43 managed-node1 podman[15255]: 2025-01-18 11:33:43.694967472 -0500 EST m=+0.072643489 container remove 1ea9ccb136e01b3c0ce0adb6d632480a4583a02c6e6afdbe07e90acf624cbc75 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46, io.buildah.version=1.38.0, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service) Jan 18 11:33:43 managed-node1 podman[15255]: 2025-01-18 11:33:43.710883834 -0500 EST m=+0.088559423 pod remove f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46 (image=, name=quadlet-pod) Jan 18 11:33:43 managed-node1 quadlet-pod-pod-pod[15255]: f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46 Jan 18 11:33:43 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Failed with result 'exit-code'. ░░ Subject: Unit failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit quadlet-pod-pod-pod.service has entered the 'failed' state with result 'exit-code'. Jan 18 11:33:43 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jan 18 11:33:43 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Scheduled restart job, restart counter is at 5. ░░ Subject: Automatic restarting of a unit has been scheduled ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ Automatic restarting of the unit quadlet-pod-pod-pod.service has been scheduled, as the result for ░░ the configured Restart= setting for the unit. Jan 18 11:33:43 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Start request repeated too quickly. Jan 18 11:33:43 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Failed with result 'exit-code'. ░░ Subject: Unit failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit quadlet-pod-pod-pod.service has entered the 'failed' state with result 'exit-code'. Jan 18 11:33:43 managed-node1 systemd[1]: Failed to start quadlet-pod-pod-pod.service. ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has finished with a failure. ░░ ░░ The job identifier is 2684 and the job result is failed. Jan 18 11:33:47 managed-node1 podman[15262]: 2025-01-18 11:33:47.002680632 -0500 EST m=+3.361550536 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610 Jan 18 11:33:47 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jan 18 11:33:47 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jan 18 11:33:47 managed-node1 python3.12[15431]: ansible-file Invoked with path=/etc/containers/systemd state=directory owner=root group=0 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Jan 18 11:33:47 managed-node1 python3.12[15562]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/systemd/quadlet-pod-container.container follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Jan 18 11:33:48 managed-node1 python3.12[15667]: ansible-ansible.legacy.copy Invoked with src=/root/.ansible/tmp/ansible-tmp-1737218027.6281447-19906-14489979928832/.source.container dest=/etc/containers/systemd/quadlet-pod-container.container owner=root group=0 mode=0644 follow=False _original_basename=systemd.j2 checksum=f0b5c8159fc3c65bf9310a371751609e4c1ba4c3 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None Jan 18 11:33:48 managed-node1 python3.12[15798]: ansible-systemd Invoked with daemon_reload=True scope=system daemon_reexec=False no_block=False name=None state=None enabled=None force=None masked=None Jan 18 11:33:48 managed-node1 systemd[1]: Reload requested from client PID 15799 ('systemctl') (unit session-5.scope)... Jan 18 11:33:48 managed-node1 systemd[1]: Reloading... Jan 18 11:33:48 managed-node1 systemd-rc-local-generator[15844]: /etc/rc.d/rc.local is not marked executable, skipping. Jan 18 11:33:48 managed-node1 systemd-ssh-generator[15846]: Failed to query local AF_VSOCK CID: Permission denied Jan 18 11:33:48 managed-node1 (sd-exec-[15819]: /usr/lib/systemd/system-generators/systemd-ssh-generator failed with exit status 1. Jan 18 11:33:49 managed-node1 systemd[1]: Reloading finished in 206 ms. Jan 18 11:33:49 managed-node1 python3.12[15984]: ansible-systemd Invoked with name=quadlet-pod-container.service scope=system state=started daemon_reload=False daemon_reexec=False no_block=False enabled=None force=None masked=None Jan 18 11:33:49 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Start request repeated too quickly. Jan 18 11:33:49 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Failed with result 'exit-code'. ░░ Subject: Unit failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit quadlet-pod-pod-pod.service has entered the 'failed' state with result 'exit-code'. Jan 18 11:33:49 managed-node1 systemd[1]: Failed to start quadlet-pod-pod-pod.service. ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has finished with a failure. ░░ ░░ The job identifier is 2769 and the job result is failed. Jan 18 11:33:49 managed-node1 systemd[1]: Dependency failed for quadlet-pod-container.service. ░░ Subject: A start job for unit quadlet-pod-container.service has failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-container.service has finished with a failure. ░░ ░░ The job identifier is 2768 and the job result is dependency. Jan 18 11:33:49 managed-node1 systemd[1]: quadlet-pod-container.service: Job quadlet-pod-container.service/start failed with result 'dependency'. Jan 18 11:33:50 managed-node1 python3.12[16118]: ansible-ansible.legacy.command Invoked with _raw_params=set -x set -o pipefail exec 1>&2 #podman volume rm --all #podman network prune -f podman volume ls podman network ls podman secret ls podman container ls podman pod ls podman images systemctl list-units | grep quadlet systemctl list-unit-files | grep quadlet ls -alrtF /etc/containers/systemd /usr/libexec/podman/quadlet -dryrun -v -no-kmsg-log _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jan 18 11:33:50 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jan 18 11:33:50 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jan 18 11:33:51 managed-node1 python3.12[16304]: ansible-ansible.legacy.command Invoked with _raw_params=grep type=AVC /var/log/audit/audit.log _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jan 18 11:33:51 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jan 18 11:33:51 managed-node1 python3.12[16436]: ansible-ansible.legacy.command Invoked with _raw_params=journalctl -ex _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None TASK [Cleanup user] ************************************************************ task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:159 Saturday 18 January 2025 11:33:51 -0500 (0:00:00.476) 0:00:26.566 ****** included: fedora.linux_system_roles.podman for managed-node1 TASK [fedora.linux_system_roles.podman : Set platform/version specific variables] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:3 Saturday 18 January 2025 11:33:51 -0500 (0:00:00.091) 0:00:26.658 ****** included: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml for managed-node1 TASK [fedora.linux_system_roles.podman : Ensure ansible_facts used by role] **** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:3 Saturday 18 January 2025 11:33:51 -0500 (0:00:00.084) 0:00:26.742 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_required_facts | difference(ansible_facts.keys() | list) | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Check if system is ostree] ************ task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:11 Saturday 18 January 2025 11:33:51 -0500 (0:00:00.036) 0:00:26.779 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "not __podman_is_ostree is defined", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set flag to indicate system is ostree] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:16 Saturday 18 January 2025 11:33:51 -0500 (0:00:00.033) 0:00:26.813 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "not __podman_is_ostree is defined", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Check if transactional-update exists in /sbin] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:23 Saturday 18 January 2025 11:33:51 -0500 (0:00:00.037) 0:00:26.850 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "not __podman_is_transactional is defined", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set flag if transactional-update exists] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:28 Saturday 18 January 2025 11:33:51 -0500 (0:00:00.046) 0:00:26.897 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "not __podman_is_transactional is defined", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set platform/version specific variables] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:32 Saturday 18 January 2025 11:33:52 -0500 (0:00:00.037) 0:00:26.934 ****** ok: [managed-node1] => (item=RedHat.yml) => { "ansible_facts": { "__podman_packages": [ "podman", "shadow-utils-subid" ] }, "ansible_included_var_files": [ "/tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/vars/RedHat.yml" ], "ansible_loop_var": "item", "changed": false, "item": "RedHat.yml" } skipping: [managed-node1] => (item=CentOS.yml) => { "ansible_loop_var": "item", "changed": false, "false_condition": "__vars_file is file", "item": "CentOS.yml", "skip_reason": "Conditional result was False" } ok: [managed-node1] => (item=CentOS_10.yml) => { "ansible_facts": { "__podman_packages": [ "iptables-nft", "podman", "shadow-utils-subid" ] }, "ansible_included_var_files": [ "/tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/vars/CentOS_10.yml" ], "ansible_loop_var": "item", "changed": false, "item": "CentOS_10.yml" } ok: [managed-node1] => (item=CentOS_10.yml) => { "ansible_facts": { "__podman_packages": [ "iptables-nft", "podman", "shadow-utils-subid" ] }, "ansible_included_var_files": [ "/tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/vars/CentOS_10.yml" ], "ansible_loop_var": "item", "changed": false, "item": "CentOS_10.yml" } TASK [fedora.linux_system_roles.podman : Gather the package facts] ************* task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:6 Saturday 18 January 2025 11:33:52 -0500 (0:00:00.075) 0:00:27.010 ****** ok: [managed-node1] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Enable copr if requested] ************* task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:10 Saturday 18 January 2025 11:33:52 -0500 (0:00:00.892) 0:00:27.902 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_use_copr | d(false)", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Ensure required packages are installed] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:14 Saturday 18 January 2025 11:33:53 -0500 (0:00:00.036) 0:00:27.939 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "(__podman_packages | difference(ansible_facts.packages))", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Notify user that reboot is needed to apply changes] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:28 Saturday 18 January 2025 11:33:53 -0500 (0:00:00.036) 0:00:27.976 ****** skipping: [managed-node1] => { "false_condition": "__podman_is_transactional | d(false)" } TASK [fedora.linux_system_roles.podman : Reboot transactional update systems] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:33 Saturday 18 January 2025 11:33:53 -0500 (0:00:00.030) 0:00:28.007 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_is_transactional | d(false)", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if reboot is needed and not set] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:38 Saturday 18 January 2025 11:33:53 -0500 (0:00:00.033) 0:00:28.041 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_is_transactional | d(false)", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get podman version] ******************* task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:46 Saturday 18 January 2025 11:33:53 -0500 (0:00:00.034) 0:00:28.075 ****** ok: [managed-node1] => { "changed": false, "cmd": [ "podman", "--version" ], "delta": "0:00:00.025654", "end": "2025-01-18 11:33:53.512310", "rc": 0, "start": "2025-01-18 11:33:53.486656" } STDOUT: podman version 5.3.1 TASK [fedora.linux_system_roles.podman : Set podman version] ******************* task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:52 Saturday 18 January 2025 11:33:53 -0500 (0:00:00.419) 0:00:28.494 ****** ok: [managed-node1] => { "ansible_facts": { "podman_version": "5.3.1" }, "changed": false } TASK [fedora.linux_system_roles.podman : Podman package version must be 4.2 or later] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:56 Saturday 18 January 2025 11:33:53 -0500 (0:00:00.033) 0:00:28.528 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_version is version(\"4.2\", \"<\")", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Podman package version must be 4.4 or later for quadlet, secrets] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:63 Saturday 18 January 2025 11:33:53 -0500 (0:00:00.027) 0:00:28.555 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_version is version(\"4.4\", \"<\")", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Podman package version must be 4.4 or later for quadlet, secrets] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:73 Saturday 18 January 2025 11:33:53 -0500 (0:00:00.046) 0:00:28.601 ****** META: end_host conditional evaluated to False, continuing execution for managed-node1 skipping: [managed-node1] => { "skip_reason": "end_host conditional evaluated to False, continuing execution for managed-node1" } MSG: end_host conditional evaluated to false, continuing execution for managed-node1 TASK [fedora.linux_system_roles.podman : Podman package version must be 5.0 or later for Pod quadlets] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:80 Saturday 18 January 2025 11:33:53 -0500 (0:00:00.093) 0:00:28.695 ****** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_version is version(\"5.0\", \"<\")", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Podman package version must be 5.0 or later for Pod quadlets] *** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:96 Saturday 18 January 2025 11:33:53 -0500 (0:00:00.083) 0:00:28.779 ****** META: end_host conditional evaluated to False, continuing execution for managed-node1 skipping: [managed-node1] => { "skip_reason": "end_host conditional evaluated to False, continuing execution for managed-node1" } MSG: end_host conditional evaluated to false, continuing execution for managed-node1 TASK [fedora.linux_system_roles.podman : Check user and group information] ***** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:109 Saturday 18 January 2025 11:33:53 -0500 (0:00:00.085) 0:00:28.865 ****** included: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml for managed-node1 TASK [fedora.linux_system_roles.podman : Get user information] ***************** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:2 Saturday 18 January 2025 11:33:54 -0500 (0:00:00.096) 0:00:28.961 ****** ok: [managed-node1] => { "ansible_facts": { "getent_passwd": { "user_quadlet_pod": null } }, "changed": false } MSG: One or more supplied key could not be found in the database. TASK [fedora.linux_system_roles.podman : Fail if user does not exist] ********** task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:9 Saturday 18 January 2025 11:33:54 -0500 (0:00:00.435) 0:00:29.397 ****** fatal: [managed-node1]: FAILED! => { "changed": false } MSG: The given podman user [user_quadlet_pod] does not exist - cannot continue TASK [Dump journal] ************************************************************ task path: /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:194 Saturday 18 January 2025 11:33:54 -0500 (0:00:00.044) 0:00:29.442 ****** fatal: [managed-node1]: FAILED! => { "changed": false, "cmd": [ "journalctl", "-ex" ], "delta": "0:00:00.027122", "end": "2025-01-18 11:33:54.872684", "failed_when_result": true, "rc": 0, "start": "2025-01-18 11:33:54.845562" } STDOUT: Jan 18 11:23:40 localhost chronyd[673]: Loaded seccomp filter (level 2) Jan 18 11:23:40 localhost systemd[1]: Started chronyd.service - NTP client/server. ░░ Subject: A start job for unit chronyd.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit chronyd.service has finished successfully. ░░ ░░ The job identifier is 226. Jan 18 11:23:40 localhost rngd[660]: Disabling 7: PKCS11 Entropy generator (pkcs11) Jan 18 11:23:40 localhost rngd[660]: Disabling 5: NIST Network Entropy Beacon (nist) Jan 18 11:23:40 localhost rngd[660]: Disabling 9: Qrypt quantum entropy beacon (qrypt) Jan 18 11:23:40 localhost rngd[660]: Disabling 10: Named pipe entropy input (namedpipe) Jan 18 11:23:40 localhost rngd[660]: Initializing available sources Jan 18 11:23:40 localhost rngd[660]: [hwrng ]: Initialization Failed Jan 18 11:23:40 localhost rngd[660]: [rdrand]: Enabling RDRAND rng support Jan 18 11:23:40 localhost rngd[660]: [rdrand]: Initialized Jan 18 11:23:40 localhost rngd[660]: [jitter]: JITTER timeout set to 5 sec Jan 18 11:23:40 localhost rngd[660]: [jitter]: Initializing AES buffer Jan 18 11:23:43 localhost cloud-init[680]: Cloud-init v. 24.1.4-21.el10 running 'init-local' at Sat, 18 Jan 2025 16:23:43 +0000. Up 17.00 seconds. Jan 18 11:23:43 localhost dhcpcd[682]: dhcpcd-10.0.6 starting Jan 18 11:23:44 localhost kernel: 8021q: 802.1Q VLAN Support v1.8 Jan 18 11:23:44 localhost systemd[1]: Listening on systemd-rfkill.socket - Load/Save RF Kill Switch Status /dev/rfkill Watch. ░░ Subject: A start job for unit systemd-rfkill.socket has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-rfkill.socket has finished successfully. ░░ ░░ The job identifier is 309. Jan 18 11:23:44 localhost kernel: cfg80211: Loading compiled-in X.509 certificates for regulatory database Jan 18 11:23:44 localhost kernel: Loaded X.509 cert 'sforshee: 00b28ddf47aef9cea7' Jan 18 11:23:44 localhost kernel: Loaded X.509 cert 'wens: 61c038651aabdcf94bd0ac7ff06c7248db18c600' Jan 18 11:23:44 localhost kernel: platform regulatory.0: Direct firmware load for regulatory.db failed with error -2 Jan 18 11:23:44 localhost kernel: cfg80211: failed to load regulatory.db Jan 18 11:23:44 localhost dhcpcd[685]: DUID 00:01:00:01:2f:1e:92:10:0a:ff:d9:eb:f1:55 Jan 18 11:23:44 localhost dhcpcd[685]: eth0: IAID d9:eb:f1:55 Jan 18 11:23:44 localhost dhcpcd[685]: eth0: soliciting a DHCP lease Jan 18 11:23:44 localhost dhcpcd[685]: eth0: offered 10.31.12.161 from 10.31.12.1 Jan 18 11:23:44 localhost dhcpcd[685]: eth0: leased 10.31.12.161 for 3600 seconds Jan 18 11:23:44 localhost dhcpcd[685]: eth0: adding route to 10.31.12.0/22 Jan 18 11:23:44 localhost dhcpcd[685]: eth0: adding default route via 10.31.12.1 Jan 18 11:23:44 localhost dhcpcd[685]: control command: /usr/sbin/dhcpcd --dumplease --ipv4only eth0 Jan 18 11:23:45 localhost rngd[660]: [jitter]: Unable to obtain AES key, disabling JITTER source Jan 18 11:23:45 localhost rngd[660]: [jitter]: Initialization Failed Jan 18 11:23:45 localhost rngd[660]: Process privileges have been dropped to 2:2 Jan 18 11:23:45 localhost systemd[1]: Starting systemd-hostnamed.service - Hostname Service... ░░ Subject: A start job for unit systemd-hostnamed.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-hostnamed.service has begun execution. ░░ ░░ The job identifier is 318. Jan 18 11:23:45 localhost systemd[1]: Started systemd-hostnamed.service - Hostname Service. ░░ Subject: A start job for unit systemd-hostnamed.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-hostnamed.service has finished successfully. ░░ ░░ The job identifier is 318. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd-hostnamed[704]: Hostname set to (static) Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Finished cloud-init-local.service - Initial cloud-init job (pre-networking). ░░ Subject: A start job for unit cloud-init-local.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-init-local.service has finished successfully. ░░ ░░ The job identifier is 271. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Reached target network-pre.target - Preparation for Network. ░░ Subject: A start job for unit network-pre.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit network-pre.target has finished successfully. ░░ ░░ The job identifier is 153. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting NetworkManager.service - Network Manager... ░░ Subject: A start job for unit NetworkManager.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager.service has begun execution. ░░ ░░ The job identifier is 218. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.4761] NetworkManager (version 1.51.5-1.el10) is starting... (boot:a3460bff-606e-4f55-935e-5075ab026950) Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.4763] Read config: /etc/NetworkManager/NetworkManager.conf, /etc/NetworkManager/conf.d/30-cloud-init-ip6-addr-gen-mode.conf Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.4890] manager[0x55800d0bdac0]: monitoring kernel firmware directory '/lib/firmware'. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.4924] hostname: hostname: using hostnamed Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.4924] hostname: static hostname changed from (none) to "ip-10-31-12-161.us-east-1.aws.redhat.com" Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.4928] dns-mgr: init: dns=default,systemd-resolved rc-manager=symlink (auto) Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.4932] manager[0x55800d0bdac0]: rfkill: Wi-Fi hardware radio set enabled Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.4932] manager[0x55800d0bdac0]: rfkill: WWAN hardware radio set enabled Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.4991] manager: rfkill: Wi-Fi enabled by radio killswitch; enabled by state file Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.4992] manager: rfkill: WWAN enabled by radio killswitch; enabled by state file Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.4992] manager: Networking is enabled by state file Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5008] settings: Loaded settings plugin: keyfile (internal) Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5058] dhcp: init: Using DHCP client 'internal' Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5061] manager: (lo): new Loopback device (/org/freedesktop/NetworkManager/Devices/1) Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5076] device (lo): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external') Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting NetworkManager-dispatcher.service - Network Manager Script Dispatcher Service... ░░ Subject: A start job for unit NetworkManager-dispatcher.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has begun execution. ░░ ░░ The job identifier is 396. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5105] device (lo): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'external') Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5122] device (lo): Activation: starting connection 'lo' (b94848ee-129e-4cfa-9ba4-69218c56ce3f) Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5132] manager: (eth0): new Ethernet device (/org/freedesktop/NetworkManager/Devices/2) Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5140] device (eth0): state change: unmanaged -> unavailable (reason 'managed', managed-type: 'external') Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Started NetworkManager.service - Network Manager. ░░ Subject: A start job for unit NetworkManager.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager.service has finished successfully. ░░ ░░ The job identifier is 218. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5155] bus-manager: acquired D-Bus service "org.freedesktop.NetworkManager" Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5159] device (lo): state change: disconnected -> prepare (reason 'none', managed-type: 'external') Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5171] device (lo): state change: prepare -> config (reason 'none', managed-type: 'external') Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Reached target network.target - Network. ░░ Subject: A start job for unit network.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit network.target has finished successfully. ░░ ░░ The job identifier is 220. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5178] device (lo): state change: config -> ip-config (reason 'none', managed-type: 'external') Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5181] device (eth0): carrier: link connected Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5190] device (lo): state change: ip-config -> ip-check (reason 'none', managed-type: 'external') Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5195] device (eth0): state change: unavailable -> disconnected (reason 'carrier-changed', managed-type: 'full') Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5206] policy: auto-activating connection 'cloud-init eth0' (1dd9a779-d327-56e1-8454-c65e2556c12c) Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5211] device (eth0): Activation: starting connection 'cloud-init eth0' (1dd9a779-d327-56e1-8454-c65e2556c12c) Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5212] device (eth0): state change: disconnected -> prepare (reason 'none', managed-type: 'full') Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5214] manager: NetworkManager state is now CONNECTING Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5215] device (eth0): state change: prepare -> config (reason 'none', managed-type: 'full') Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5224] device (eth0): state change: config -> ip-config (reason 'none', managed-type: 'full') Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5237] dhcp4 (eth0): activation: beginning transaction (timeout in 45 seconds) Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting NetworkManager-wait-online.service - Network Manager Wait Online... ░░ Subject: A start job for unit NetworkManager-wait-online.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-wait-online.service has begun execution. ░░ ░░ The job identifier is 217. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.5276] dhcp4 (eth0): state changed new lease, address=10.31.12.161, acd pending Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting gssproxy.service - GSSAPI Proxy Daemon... ░░ Subject: A start job for unit gssproxy.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit gssproxy.service has begun execution. ░░ ░░ The job identifier is 248. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.7058] dhcp4 (eth0): state changed new lease, address=10.31.12.161 Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.7069] policy: set 'cloud-init eth0' (eth0) as default for IPv4 routing and DNS Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.7178] device (eth0): state change: ip-config -> ip-check (reason 'none', managed-type: 'full') Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Started gssproxy.service - GSSAPI Proxy Daemon. ░░ Subject: A start job for unit gssproxy.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit gssproxy.service has finished successfully. ░░ ░░ The job identifier is 248. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: rpc-gssd.service - RPC security service for NFS client and server was skipped because of an unmet condition check (ConditionPathExists=/etc/krb5.keytab). ░░ Subject: A start job for unit rpc-gssd.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit rpc-gssd.service has finished successfully. ░░ ░░ The job identifier is 249. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Reached target nfs-client.target - NFS client services. ░░ Subject: A start job for unit nfs-client.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit nfs-client.target has finished successfully. ░░ ░░ The job identifier is 244. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Reached target remote-fs-pre.target - Preparation for Remote File Systems. ░░ Subject: A start job for unit remote-fs-pre.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit remote-fs-pre.target has finished successfully. ░░ ░░ The job identifier is 246. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Reached target remote-cryptsetup.target - Remote Encrypted Volumes. ░░ Subject: A start job for unit remote-cryptsetup.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit remote-cryptsetup.target has finished successfully. ░░ ░░ The job identifier is 273. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Reached target remote-fs.target - Remote File Systems. ░░ Subject: A start job for unit remote-fs.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit remote-fs.target has finished successfully. ░░ ░░ The job identifier is 243. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: systemd-pcrphase.service - TPM PCR Barrier (User) was skipped because of an unmet condition check (ConditionSecurity=measured-uki). ░░ Subject: A start job for unit systemd-pcrphase.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-pcrphase.service has finished successfully. ░░ ░░ The job identifier is 187. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Started NetworkManager-dispatcher.service - Network Manager Script Dispatcher Service. ░░ Subject: A start job for unit NetworkManager-dispatcher.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has finished successfully. ░░ ░░ The job identifier is 396. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.7494] device (lo): state change: ip-check -> secondaries (reason 'none', managed-type: 'external') Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.7499] device (lo): state change: secondaries -> activated (reason 'none', managed-type: 'external') Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.7513] device (lo): Activation: successful, device activated. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.7527] device (eth0): state change: ip-check -> secondaries (reason 'none', managed-type: 'full') Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.7529] device (eth0): state change: secondaries -> activated (reason 'none', managed-type: 'full') Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.7532] manager: NetworkManager state is now CONNECTED_SITE Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.7534] device (eth0): Activation: successful, device activated. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.7539] manager: NetworkManager state is now CONNECTED_GLOBAL Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com NetworkManager[711]: [1737217425.7541] manager: startup complete Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Finished NetworkManager-wait-online.service - Network Manager Wait Online. ░░ Subject: A start job for unit NetworkManager-wait-online.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-wait-online.service has finished successfully. ░░ ░░ The job identifier is 217. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting cloud-init.service - Initial cloud-init job (metadata service crawler)... ░░ Subject: A start job for unit cloud-init.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-init.service has begun execution. ░░ ░░ The job identifier is 272. Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com chronyd[673]: Added source 10.11.160.238 Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com chronyd[673]: Added source 10.18.100.10 Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com chronyd[673]: Added source 10.2.32.37 Jan 18 11:23:45 ip-10-31-12-161.us-east-1.aws.redhat.com chronyd[673]: Added source 10.2.32.38 Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: Cloud-init v. 24.1.4-21.el10 running 'init' at Sat, 18 Jan 2025 16:23:46 +0000. Up 19.25 seconds. Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: ++++++++++++++++++++++++++++++++++++++Net device info+++++++++++++++++++++++++++++++++++++++ Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: +--------+------+-----------------------------+---------------+--------+-------------------+ Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: | Device | Up | Address | Mask | Scope | Hw-Address | Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: +--------+------+-----------------------------+---------------+--------+-------------------+ Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: | eth0 | True | 10.31.12.161 | 255.255.252.0 | global | 0a:ff:d9:eb:f1:55 | Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: | eth0 | True | fe80::8ff:d9ff:feeb:f155/64 | . | link | 0a:ff:d9:eb:f1:55 | Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: | lo | True | 127.0.0.1 | 255.0.0.0 | host | . | Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: | lo | True | ::1/128 | . | host | . | Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: +--------+------+-----------------------------+---------------+--------+-------------------+ Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: ++++++++++++++++++++++++++++Route IPv4 info+++++++++++++++++++++++++++++ Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: +-------+-------------+------------+---------------+-----------+-------+ Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: | Route | Destination | Gateway | Genmask | Interface | Flags | Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: +-------+-------------+------------+---------------+-----------+-------+ Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: | 0 | 0.0.0.0 | 10.31.12.1 | 0.0.0.0 | eth0 | UG | Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: | 1 | 10.31.12.0 | 0.0.0.0 | 255.255.252.0 | eth0 | U | Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: +-------+-------------+------------+---------------+-----------+-------+ Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: +++++++++++++++++++Route IPv6 info+++++++++++++++++++ Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: +-------+-------------+---------+-----------+-------+ Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: | Route | Destination | Gateway | Interface | Flags | Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: +-------+-------------+---------+-----------+-------+ Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: | 0 | fe80::/64 | :: | eth0 | U | Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: | 2 | multicast | :: | eth0 | U | Jan 18 11:23:46 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: ci-info: +-------+-------------+---------+-----------+-------+ Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: Generating public/private rsa key pair. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: Your identification has been saved in /etc/ssh/ssh_host_rsa_key Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: Your public key has been saved in /etc/ssh/ssh_host_rsa_key.pub Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: The key fingerprint is: Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: SHA256:MSShpDEbP9ogRFzRCb42XAP8rHSpHahFRsKUYAV1Obg root@ip-10-31-12-161.us-east-1.aws.redhat.com Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: The key's randomart image is: Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: +---[RSA 3072]----+ Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: |BB%O*.=.. | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: |o+o&o* o | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: |. *.Ooo o | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | ..EoB. o | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | ==* . S | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | ..o.. | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: +----[SHA256]-----+ Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: Generating public/private ecdsa key pair. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: Your identification has been saved in /etc/ssh/ssh_host_ecdsa_key Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: Your public key has been saved in /etc/ssh/ssh_host_ecdsa_key.pub Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: The key fingerprint is: Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: SHA256:GCuXMVhO0FORMh2uLos29Hp7hzwKfYDM2sDdOFQyBFA root@ip-10-31-12-161.us-east-1.aws.redhat.com Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: The key's randomart image is: Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: +---[ECDSA 256]---+ Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: |ooE+.+oo++ | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | +==.o | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | .. =+. | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: |.oo.o O | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: |..++o.* S | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | +...= | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: |..o.o.o. | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | ooo+= . | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | .o+++ o | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: +----[SHA256]-----+ Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: Generating public/private ed25519 key pair. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: Your identification has been saved in /etc/ssh/ssh_host_ed25519_key Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: Your public key has been saved in /etc/ssh/ssh_host_ed25519_key.pub Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: The key fingerprint is: Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: SHA256:BQ2/bBZTO1s7EUC1xyuYfLltQADkjE3Pm1eZdeVIMeM root@ip-10-31-12-161.us-east-1.aws.redhat.com Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: The key's randomart image is: Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: +--[ED25519 256]--+ Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | +*.o+oB.=| Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | *o+..+ O=| Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | . +=oo.E++| Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | o.+*+o+.| Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | S =*.*o. | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | o o =. | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | . o | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | . | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: | | Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[798]: +----[SHA256]-----+ Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Finished cloud-init.service - Initial cloud-init job (metadata service crawler). ░░ Subject: A start job for unit cloud-init.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-init.service has finished successfully. ░░ ░░ The job identifier is 272. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Reached target cloud-config.target - Cloud-config availability. ░░ Subject: A start job for unit cloud-config.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-config.target has finished successfully. ░░ ░░ The job identifier is 270. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Reached target network-online.target - Network is Online. ░░ Subject: A start job for unit network-online.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit network-online.target has finished successfully. ░░ ░░ The job identifier is 216. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting cloud-config.service - Apply the settings specified in cloud-config... ░░ Subject: A start job for unit cloud-config.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-config.service has begun execution. ░░ ░░ The job identifier is 269. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting kdump.service - Crash recovery kernel arming... ░░ Subject: A start job for unit kdump.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit kdump.service has begun execution. ░░ ░░ The job identifier is 274. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting restraintd.service - The restraint harness.... ░░ Subject: A start job for unit restraintd.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit restraintd.service has begun execution. ░░ ░░ The job identifier is 229. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting rpc-statd-notify.service - Notify NFS peers of a restart... ░░ Subject: A start job for unit rpc-statd-notify.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit rpc-statd-notify.service has begun execution. ░░ ░░ The job identifier is 245. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting rsyslog.service - System Logging Service... ░░ Subject: A start job for unit rsyslog.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit rsyslog.service has begun execution. ░░ ░░ The job identifier is 253. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting sshd.service - OpenSSH server daemon... ░░ Subject: A start job for unit sshd.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd.service has begun execution. ░░ ░░ The job identifier is 260. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com sm-notify[881]: Version 2.8.2 starting Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Started rpc-statd-notify.service - Notify NFS peers of a restart. ░░ Subject: A start job for unit rpc-statd-notify.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit rpc-statd-notify.service has finished successfully. ░░ ░░ The job identifier is 245. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com (sshd)[883]: sshd.service: Referenced but unset environment variable evaluates to an empty string: OPTIONS Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com sshd[883]: Server listening on 0.0.0.0 port 22. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com sshd[883]: Server listening on :: port 22. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Started sshd.service - OpenSSH server daemon. ░░ Subject: A start job for unit sshd.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd.service has finished successfully. ░░ ░░ The job identifier is 260. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Started restraintd.service - The restraint harness.. ░░ Subject: A start job for unit restraintd.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit restraintd.service has finished successfully. ░░ ░░ The job identifier is 229. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com rsyslogd[882]: [origin software="rsyslogd" swVersion="8.2412.0-1.el10" x-pid="882" x-info="https://www.rsyslog.com"] start Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Started rsyslog.service - System Logging Service. ░░ Subject: A start job for unit rsyslog.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit rsyslog.service has finished successfully. ░░ ░░ The job identifier is 253. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com rsyslogd[882]: imjournal: journal files changed, reloading... [v8.2412.0-1.el10 try https://www.rsyslog.com/e/0 ] Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[929]: Cloud-init v. 24.1.4-21.el10 running 'modules:config' at Sat, 18 Jan 2025 16:23:47 +0000. Up 20.91 seconds. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Stopping sshd.service - OpenSSH server daemon... ░░ Subject: A stop job for unit sshd.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit sshd.service has begun execution. ░░ ░░ The job identifier is 499. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com sshd[883]: Received signal 15; terminating. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: sshd.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit sshd.service has successfully entered the 'dead' state. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Stopped sshd.service - OpenSSH server daemon. ░░ Subject: A stop job for unit sshd.service has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit sshd.service has finished. ░░ ░░ The job identifier is 499 and the job result is done. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Stopped target sshd-keygen.target. ░░ Subject: A stop job for unit sshd-keygen.target has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit sshd-keygen.target has finished. ░░ ░░ The job identifier is 583 and the job result is done. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Stopping sshd-keygen.target... ░░ Subject: A stop job for unit sshd-keygen.target has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit sshd-keygen.target has begun execution. ░░ ░░ The job identifier is 583. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: ssh-host-keys-migration.service - Update OpenSSH host key permissions was skipped because of an unmet condition check (ConditionPathExists=!/var/lib/.ssh-host-keys-migration). ░░ Subject: A start job for unit ssh-host-keys-migration.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit ssh-host-keys-migration.service has finished successfully. ░░ ░░ The job identifier is 582. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: sshd-keygen@ecdsa.service - OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target). ░░ Subject: A start job for unit sshd-keygen@ecdsa.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd-keygen@ecdsa.service has finished successfully. ░░ ░░ The job identifier is 580. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: sshd-keygen@ed25519.service - OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target). ░░ Subject: A start job for unit sshd-keygen@ed25519.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd-keygen@ed25519.service has finished successfully. ░░ ░░ The job identifier is 581. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: sshd-keygen@rsa.service - OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target). ░░ Subject: A start job for unit sshd-keygen@rsa.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd-keygen@rsa.service has finished successfully. ░░ ░░ The job identifier is 578. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Reached target sshd-keygen.target. ░░ Subject: A start job for unit sshd-keygen.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd-keygen.target has finished successfully. ░░ ░░ The job identifier is 583. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting sshd.service - OpenSSH server daemon... ░░ Subject: A start job for unit sshd.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd.service has begun execution. ░░ ░░ The job identifier is 499. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com (sshd)[933]: sshd.service: Referenced but unset environment variable evaluates to an empty string: OPTIONS Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com sshd[933]: Server listening on 0.0.0.0 port 22. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com sshd[933]: Server listening on :: port 22. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Started sshd.service - OpenSSH server daemon. ░░ Subject: A start job for unit sshd.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd.service has finished successfully. ░░ ░░ The job identifier is 499. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Finished cloud-config.service - Apply the settings specified in cloud-config. ░░ Subject: A start job for unit cloud-config.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-config.service has finished successfully. ░░ ░░ The job identifier is 269. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting cloud-final.service - Execute cloud user/final scripts... ░░ Subject: A start job for unit cloud-final.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-final.service has begun execution. ░░ ░░ The job identifier is 268. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting systemd-user-sessions.service - Permit User Sessions... ░░ Subject: A start job for unit systemd-user-sessions.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-user-sessions.service has begun execution. ░░ ░░ The job identifier is 255. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Finished systemd-user-sessions.service - Permit User Sessions. ░░ Subject: A start job for unit systemd-user-sessions.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-user-sessions.service has finished successfully. ░░ ░░ The job identifier is 255. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Started crond.service - Command Scheduler. ░░ Subject: A start job for unit crond.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit crond.service has finished successfully. ░░ ░░ The job identifier is 278. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Started getty@tty1.service - Getty on tty1. ░░ Subject: A start job for unit getty@tty1.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit getty@tty1.service has finished successfully. ░░ ░░ The job identifier is 241. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Started serial-getty@ttyS0.service - Serial Getty on ttyS0. ░░ Subject: A start job for unit serial-getty@ttyS0.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit serial-getty@ttyS0.service has finished successfully. ░░ ░░ The job identifier is 236. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Reached target getty.target - Login Prompts. ░░ Subject: A start job for unit getty.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit getty.target has finished successfully. ░░ ░░ The job identifier is 235. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Reached target multi-user.target - Multi-User System. ░░ Subject: A start job for unit multi-user.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit multi-user.target has finished successfully. ░░ ░░ The job identifier is 121. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting systemd-update-utmp-runlevel.service - Record Runlevel Change in UTMP... ░░ Subject: A start job for unit systemd-update-utmp-runlevel.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-update-utmp-runlevel.service has begun execution. ░░ ░░ The job identifier is 257. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: systemd-update-utmp-runlevel.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit systemd-update-utmp-runlevel.service has successfully entered the 'dead' state. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Finished systemd-update-utmp-runlevel.service - Record Runlevel Change in UTMP. ░░ Subject: A start job for unit systemd-update-utmp-runlevel.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-update-utmp-runlevel.service has finished successfully. ░░ ░░ The job identifier is 257. Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com crond[938]: (CRON) STARTUP (1.7.0) Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com crond[938]: (CRON) INFO (Syslog will be used instead of sendmail.) Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com crond[938]: (CRON) INFO (RANDOM_DELAY will be scaled with factor 5% if used.) Jan 18 11:23:47 ip-10-31-12-161.us-east-1.aws.redhat.com crond[938]: (CRON) INFO (running with inotify support) Jan 18 11:23:48 ip-10-31-12-161.us-east-1.aws.redhat.com restraintd[891]: Listening on http://localhost:8081 Jan 18 11:23:48 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[966]: Cloud-init v. 24.1.4-21.el10 running 'modules:final' at Sat, 18 Jan 2025 16:23:48 +0000. Up 21.37 seconds. Jan 18 11:23:48 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[1001]: ############################################################# Jan 18 11:23:48 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[1002]: -----BEGIN SSH HOST KEY FINGERPRINTS----- Jan 18 11:23:48 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[1004]: 256 SHA256:GCuXMVhO0FORMh2uLos29Hp7hzwKfYDM2sDdOFQyBFA root@ip-10-31-12-161.us-east-1.aws.redhat.com (ECDSA) Jan 18 11:23:48 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[1006]: 256 SHA256:BQ2/bBZTO1s7EUC1xyuYfLltQADkjE3Pm1eZdeVIMeM root@ip-10-31-12-161.us-east-1.aws.redhat.com (ED25519) Jan 18 11:23:48 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[1010]: 3072 SHA256:MSShpDEbP9ogRFzRCb42XAP8rHSpHahFRsKUYAV1Obg root@ip-10-31-12-161.us-east-1.aws.redhat.com (RSA) Jan 18 11:23:48 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[1012]: -----END SSH HOST KEY FINGERPRINTS----- Jan 18 11:23:48 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[1015]: ############################################################# Jan 18 11:23:48 ip-10-31-12-161.us-east-1.aws.redhat.com kdumpctl[887]: kdump: Detected change(s) in the following file(s): /etc/fstab Jan 18 11:23:48 ip-10-31-12-161.us-east-1.aws.redhat.com cloud-init[966]: Cloud-init v. 24.1.4-21.el10 finished at Sat, 18 Jan 2025 16:23:48 +0000. Datasource DataSourceEc2Local. Up 21.51 seconds Jan 18 11:23:48 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Finished cloud-final.service - Execute cloud user/final scripts. ░░ Subject: A start job for unit cloud-final.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-final.service has finished successfully. ░░ ░░ The job identifier is 268. Jan 18 11:23:48 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Reached target cloud-init.target - Cloud-init target. ░░ Subject: A start job for unit cloud-init.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-init.target has finished successfully. ░░ ░░ The job identifier is 267. Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: Cannot change IRQ 0 affinity: Permission denied Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: IRQ 0 affinity is now unmanaged Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: Cannot change IRQ 48 affinity: Permission denied Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: IRQ 48 affinity is now unmanaged Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: Cannot change IRQ 49 affinity: Permission denied Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: IRQ 49 affinity is now unmanaged Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: Cannot change IRQ 50 affinity: Permission denied Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: IRQ 50 affinity is now unmanaged Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: Cannot change IRQ 51 affinity: Permission denied Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: IRQ 51 affinity is now unmanaged Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: Cannot change IRQ 52 affinity: Permission denied Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: IRQ 52 affinity is now unmanaged Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: Cannot change IRQ 53 affinity: Permission denied Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: IRQ 53 affinity is now unmanaged Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: Cannot change IRQ 54 affinity: Permission denied Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: IRQ 54 affinity is now unmanaged Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: Cannot change IRQ 55 affinity: Permission denied Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: IRQ 55 affinity is now unmanaged Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: Cannot change IRQ 56 affinity: Permission denied Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: IRQ 56 affinity is now unmanaged Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: Cannot change IRQ 57 affinity: Permission denied Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: IRQ 57 affinity is now unmanaged Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: Cannot change IRQ 58 affinity: Permission denied Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: IRQ 58 affinity is now unmanaged Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: Cannot change IRQ 59 affinity: Permission denied Jan 18 11:23:49 ip-10-31-12-161.us-east-1.aws.redhat.com irqbalance[659]: IRQ 59 affinity is now unmanaged Jan 18 11:23:51 ip-10-31-12-161.us-east-1.aws.redhat.com chronyd[673]: Selected source 10.2.32.38 Jan 18 11:23:52 ip-10-31-12-161.us-east-1.aws.redhat.com kernel: block xvda: the capability attribute has been deprecated. Jan 18 11:23:52 ip-10-31-12-161.us-east-1.aws.redhat.com kdumpctl[887]: kdump: Rebuilding /boot/initramfs-6.12.0-38.el10.x86_64kdump.img Jan 18 11:23:53 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1398]: dracut-103-1.el10 Jan 18 11:23:53 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1401]: Executing: /usr/bin/dracut --list-modules Jan 18 11:23:53 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1473]: dracut-103-1.el10 Jan 18 11:23:53 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Executing: /usr/bin/dracut --add kdumpbase --quiet --hostonly --hostonly-cmdline --hostonly-i18n --hostonly-mode strict --hostonly-nics --aggressive-strip --omit "rdma plymouth resume ifcfg earlykdump" --mount "/dev/disk/by-uuid/5421f911-fafd-4f0d-bf2e-2916252992eb /sysroot xfs rw,relatime,seclabel,attr2,inode64,logbufs=8,logbsize=32k,noquota" --add squash-squashfs --squash-compressor zstd --no-hostonly-default-device -f /boot/initramfs-6.12.0-38.el10.x86_64kdump.img 6.12.0-38.el10.x86_64 Jan 18 11:23:53 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'systemd-bsod' will not be installed, because command '/usr/lib/systemd/systemd-bsod' could not be found! Jan 18 11:23:53 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'systemd-networkd' will not be installed, because command '/usr/lib/systemd/systemd-networkd' could not be found! Jan 18 11:23:53 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'systemd-networkd' will not be installed, because command '/usr/lib/systemd/systemd-networkd-wait-online' could not be found! Jan 18 11:23:53 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'systemd-pcrphase' will not be installed, because command '/usr/lib/systemd/systemd-pcrphase' could not be found! Jan 18 11:23:53 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'systemd-portabled' will not be installed, because command 'portablectl' could not be found! Jan 18 11:23:53 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'systemd-portabled' will not be installed, because command '/usr/lib/systemd/systemd-portabled' could not be found! Jan 18 11:23:53 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'systemd-resolved' will not be installed, because command '/usr/lib/systemd/systemd-resolved' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'systemd-timesyncd' will not be installed, because command '/usr/lib/systemd/systemd-timesyncd' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'systemd-timesyncd' will not be installed, because command '/usr/lib/systemd/systemd-time-wait-sync' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'busybox' will not be installed, because command 'busybox' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'dbus-daemon' will not be installed, because command 'dbus-daemon' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'connman' will not be installed, because command 'connmand' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'connman' will not be installed, because command 'connmanctl' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'connman' will not be installed, because command 'connmand-wait-online' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'ifcfg' will not be installed, because it's in the list to be omitted! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'plymouth' will not be installed, because it's in the list to be omitted! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: 62bluetooth: Could not find any command of '/usr/lib/bluetooth/bluetoothd /usr/libexec/bluetooth/bluetoothd'! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'btrfs' will not be installed, because command 'btrfs' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'dmraid' will not be installed, because command 'dmraid' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'mdraid' will not be installed, because command 'mdadm' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'multipath' will not be installed, because command 'multipath' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'crypt-gpg' will not be installed, because command 'gpg' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'pcsc' will not be installed, because command 'pcscd' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'cifs' will not be installed, because command 'mount.cifs' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'iscsi' will not be installed, because command 'iscsi-iname' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'iscsi' will not be installed, because command 'iscsiadm' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'iscsi' will not be installed, because command 'iscsid' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'nvmf' will not be installed, because command 'nvme' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'resume' will not be installed, because it's in the list to be omitted! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'squash-erofs' will not be installed, because command 'mkfs.erofs' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'squash-erofs' will not be installed, because command 'fsck.erofs' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'biosdevname' will not be installed, because command 'biosdevname' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'earlykdump' will not be installed, because it's in the list to be omitted! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'systemd-bsod' will not be installed, because command '/usr/lib/systemd/systemd-bsod' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'systemd-pcrphase' will not be installed, because command '/usr/lib/systemd/systemd-pcrphase' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'systemd-portabled' will not be installed, because command 'portablectl' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'systemd-portabled' will not be installed, because command '/usr/lib/systemd/systemd-portabled' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'systemd-resolved' will not be installed, because command '/usr/lib/systemd/systemd-resolved' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'systemd-timesyncd' will not be installed, because command '/usr/lib/systemd/systemd-timesyncd' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'systemd-timesyncd' will not be installed, because command '/usr/lib/systemd/systemd-time-wait-sync' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'busybox' will not be installed, because command 'busybox' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'dbus-daemon' will not be installed, because command 'dbus-daemon' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'connman' will not be installed, because command 'connmand' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'connman' will not be installed, because command 'connmanctl' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'connman' will not be installed, because command 'connmand-wait-online' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: 62bluetooth: Could not find any command of '/usr/lib/bluetooth/bluetoothd /usr/libexec/bluetooth/bluetoothd'! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'btrfs' will not be installed, because command 'btrfs' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'dmraid' will not be installed, because command 'dmraid' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'mdraid' will not be installed, because command 'mdadm' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'multipath' will not be installed, because command 'multipath' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'crypt-gpg' will not be installed, because command 'gpg' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'pcsc' will not be installed, because command 'pcscd' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'cifs' will not be installed, because command 'mount.cifs' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'iscsi' will not be installed, because command 'iscsi-iname' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'iscsi' will not be installed, because command 'iscsiadm' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'iscsi' will not be installed, because command 'iscsid' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'nvmf' will not be installed, because command 'nvme' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'squash-erofs' will not be installed, because command 'mkfs.erofs' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Module 'squash-erofs' will not be installed, because command 'fsck.erofs' could not be found! Jan 18 11:23:54 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: systemd *** Jan 18 11:23:55 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: fips *** Jan 18 11:23:55 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: fips-crypto-policies *** Jan 18 11:23:55 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: systemd-ask-password *** Jan 18 11:23:55 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: systemd-initrd *** Jan 18 11:23:55 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: systemd-journald *** Jan 18 11:23:55 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: systemd-modules-load *** Jan 18 11:23:55 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: systemd-sysctl *** Jan 18 11:23:55 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: systemd-sysusers *** Jan 18 11:23:55 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: systemd-tmpfiles *** Jan 18 11:23:55 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: systemd-udevd *** Jan 18 11:23:55 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: rngd *** Jan 18 11:23:55 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: i18n *** Jan 18 11:23:55 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: drm *** Jan 18 11:23:55 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: prefixdevname *** Jan 18 11:23:55 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: kernel-modules *** Jan 18 11:23:55 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit NetworkManager-dispatcher.service has successfully entered the 'dead' state. Jan 18 11:23:56 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: kernel-modules-extra *** Jan 18 11:23:56 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: kernel-modules-extra: configuration source "/run/depmod.d" does not exist Jan 18 11:23:56 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: kernel-modules-extra: configuration source "/lib/depmod.d" does not exist Jan 18 11:23:56 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: kernel-modules-extra: parsing configuration file "/etc/depmod.d/dist.conf" Jan 18 11:23:56 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: kernel-modules-extra: /etc/depmod.d/dist.conf: added "updates extra built-in weak-updates" to the list of search directories Jan 18 11:23:56 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: pcmcia *** Jan 18 11:23:56 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Skipping udev rule: 60-pcmcia.rules Jan 18 11:23:56 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: fstab-sys *** Jan 18 11:23:56 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: hwdb *** Jan 18 11:23:56 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: rootfs-block *** Jan 18 11:23:56 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: squash-squashfs *** Jan 18 11:23:56 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: terminfo *** Jan 18 11:23:56 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: udev-rules *** Jan 18 11:23:56 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: dracut-systemd *** Jan 18 11:23:56 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: usrmount *** Jan 18 11:23:56 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: base *** Jan 18 11:23:56 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: fs-lib *** Jan 18 11:23:57 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: kdumpbase *** Jan 18 11:23:57 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: memstrack *** Jan 18 11:23:57 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: microcode_ctl-fw_dir_override *** Jan 18 11:23:57 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: microcode_ctl module: mangling fw_dir Jan 18 11:23:57 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: microcode_ctl: reset fw_dir to "/lib/firmware/updates /lib/firmware" Jan 18 11:23:57 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: microcode_ctl: processing data directory "/usr/share/microcode_ctl/ucode_with_caveats/intel"... Jan 18 11:23:57 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: microcode_ctl: intel: caveats check for kernel version "6.12.0-38.el10.x86_64" passed, adding "/usr/share/microcode_ctl/ucode_with_caveats/intel" to fw_dir variable Jan 18 11:23:57 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: microcode_ctl: processing data directory "/usr/share/microcode_ctl/ucode_with_caveats/intel-06-4f-01"... Jan 18 11:23:57 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: microcode_ctl: configuration "intel-06-4f-01" is ignored Jan 18 11:23:57 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: microcode_ctl: final fw_dir: "/usr/share/microcode_ctl/ucode_with_caveats/intel /lib/firmware/updates /lib/firmware" Jan 18 11:23:57 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: shutdown *** Jan 18 11:23:57 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including module: squash-lib *** Jan 18 11:23:57 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Including modules done *** Jan 18 11:23:57 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Installing kernel module dependencies *** Jan 18 11:23:57 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Installing kernel module dependencies done *** Jan 18 11:23:57 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Resolving executable dependencies *** Jan 18 11:23:58 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Resolving executable dependencies done *** Jan 18 11:23:58 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Hardlinking files *** Jan 18 11:23:58 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Mode: real Jan 18 11:23:58 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Method: sha256 Jan 18 11:23:58 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Files: 548 Jan 18 11:23:58 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Linked: 25 files Jan 18 11:23:58 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Compared: 0 xattrs Jan 18 11:23:58 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Compared: 53 files Jan 18 11:23:58 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Saved: 13.6 MiB Jan 18 11:23:58 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Duration: 0.174529 seconds Jan 18 11:23:58 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Hardlinking files done *** Jan 18 11:23:58 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Generating early-microcode cpio image *** Jan 18 11:23:58 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Constructing GenuineIntel.bin *** Jan 18 11:23:58 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Constructing GenuineIntel.bin *** Jan 18 11:23:58 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Store current command line parameters *** Jan 18 11:23:58 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: Stored kernel commandline: Jan 18 11:23:58 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: No dracut internal kernel commandline stored in the initramfs Jan 18 11:23:59 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Squashing the files inside the initramfs *** Jan 18 11:24:07 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Squashing the files inside the initramfs done *** Jan 18 11:24:07 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Creating image file '/boot/initramfs-6.12.0-38.el10.x86_64kdump.img' *** Jan 18 11:24:07 ip-10-31-12-161.us-east-1.aws.redhat.com dracut[1476]: *** Creating initramfs image file '/boot/initramfs-6.12.0-38.el10.x86_64kdump.img' done *** Jan 18 11:24:07 ip-10-31-12-161.us-east-1.aws.redhat.com kernel: PKCS7: Message signed outside of X.509 validity window Jan 18 11:24:08 ip-10-31-12-161.us-east-1.aws.redhat.com kdumpctl[887]: kdump: kexec: loaded kdump kernel Jan 18 11:24:08 ip-10-31-12-161.us-east-1.aws.redhat.com kdumpctl[887]: kdump: Starting kdump: [OK] Jan 18 11:24:08 ip-10-31-12-161.us-east-1.aws.redhat.com kdumpctl[887]: kdump: Notice: No vmcore creation test performed! Jan 18 11:24:08 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Finished kdump.service - Crash recovery kernel arming. ░░ Subject: A start job for unit kdump.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit kdump.service has finished successfully. ░░ ░░ The job identifier is 274. Jan 18 11:24:08 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Startup finished in 999ms (kernel) + 4.893s (initrd) + 35.521s (userspace) = 41.414s. ░░ Subject: System start-up is now complete ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ All system services necessary queued for starting at boot have been ░░ started. Note that this does not mean that the machine is now idle as services ░░ might still be busy with completing start-up. ░░ ░░ Kernel start-up required 999279 microseconds. ░░ ░░ Initrd start-up required 4893825 microseconds. ░░ ░░ Userspace start-up required 35521236 microseconds. Jan 18 11:24:15 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: systemd-hostnamed.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit systemd-hostnamed.service has successfully entered the 'dead' state. Jan 18 11:24:57 ip-10-31-12-161.us-east-1.aws.redhat.com chronyd[673]: Selected source 24.187.197.51 (2.centos.pool.ntp.org) Jan 18 11:26:37 ip-10-31-12-161.us-east-1.aws.redhat.com sshd-session[4336]: Accepted publickey for root from 10.30.32.164 port 60662 ssh2: RSA SHA256:W3cSdmPJK+d9RwU97ardijPXIZnxHswrpTHWW9oYtEU Jan 18 11:26:37 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Created slice user-0.slice - User Slice of UID 0. ░░ Subject: A start job for unit user-0.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit user-0.slice has finished successfully. ░░ ░░ The job identifier is 662. Jan 18 11:26:37 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting user-runtime-dir@0.service - User Runtime Directory /run/user/0... ░░ Subject: A start job for unit user-runtime-dir@0.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit user-runtime-dir@0.service has begun execution. ░░ ░░ The job identifier is 584. Jan 18 11:26:37 ip-10-31-12-161.us-east-1.aws.redhat.com systemd-logind[661]: New session 1 of user root. ░░ Subject: A new session 1 has been created for user root ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ Documentation: sd-login(3) ░░ ░░ A new session with the ID 1 has been created for the user root. ░░ ░░ The leading process of the session is 4336. Jan 18 11:26:37 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Finished user-runtime-dir@0.service - User Runtime Directory /run/user/0. ░░ Subject: A start job for unit user-runtime-dir@0.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit user-runtime-dir@0.service has finished successfully. ░░ ░░ The job identifier is 584. Jan 18 11:26:37 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting user@0.service - User Manager for UID 0... ░░ Subject: A start job for unit user@0.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit user@0.service has begun execution. ░░ ░░ The job identifier is 664. Jan 18 11:26:37 ip-10-31-12-161.us-east-1.aws.redhat.com systemd-logind[661]: New session 2 of user root. ░░ Subject: A new session 2 has been created for user root ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ Documentation: sd-login(3) ░░ ░░ A new session with the ID 2 has been created for the user root. ░░ ░░ The leading process of the session is 4341. Jan 18 11:26:37 ip-10-31-12-161.us-east-1.aws.redhat.com (systemd)[4341]: pam_unix(systemd-user:session): session opened for user root(uid=0) by root(uid=0) Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[4341]: Queued start job for default target default.target. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[4341]: Created slice app.slice - User Application Slice. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 9. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[4341]: grub-boot-success.timer - Mark boot as successful after the user session has run 2 minutes was skipped because of an unmet condition check (ConditionUser=!@system). ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 6. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[4341]: Started systemd-tmpfiles-clean.timer - Daily Cleanup of User's Temporary Directories. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 4. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[4341]: Reached target paths.target - Paths. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 12. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[4341]: Reached target timers.target - Timers. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 3. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[4341]: Starting dbus.socket - D-Bus User Message Bus Socket... ░░ Subject: A start job for unit UNIT has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has begun execution. ░░ ░░ The job identifier is 8. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[4341]: Starting systemd-tmpfiles-setup.service - Create User Files and Directories... ░░ Subject: A start job for unit UNIT has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has begun execution. ░░ ░░ The job identifier is 11. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[4341]: Finished systemd-tmpfiles-setup.service - Create User Files and Directories. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 11. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[4341]: Listening on dbus.socket - D-Bus User Message Bus Socket. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 8. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[4341]: Reached target sockets.target - Sockets. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 7. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[4341]: Reached target basic.target - Basic System. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 2. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[4341]: Reached target default.target - Main User Target. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 1. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[4341]: Startup finished in 116ms. ░░ Subject: User manager start-up is now complete ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The user manager instance for user 0 has been started. All services queued ░░ for starting have been started. Note that other services might still be starting ░░ up or be started at any later time. ░░ ░░ Startup of the manager took 116963 microseconds. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Started user@0.service - User Manager for UID 0. ░░ Subject: A start job for unit user@0.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit user@0.service has finished successfully. ░░ ░░ The job identifier is 664. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Started session-1.scope - Session 1 of User root. ░░ Subject: A start job for unit session-1.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit session-1.scope has finished successfully. ░░ ░░ The job identifier is 745. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com sshd-session[4336]: pam_unix(sshd:session): session opened for user root(uid=0) by root(uid=0) Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com sshd-session[4352]: Received disconnect from 10.30.32.164 port 60662:11: disconnected by user Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com sshd-session[4352]: Disconnected from user root 10.30.32.164 port 60662 Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com sshd-session[4336]: pam_unix(sshd:session): session closed for user root Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd-logind[661]: Session 1 logged out. Waiting for processes to exit. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: session-1.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit session-1.scope has successfully entered the 'dead' state. Jan 18 11:26:38 ip-10-31-12-161.us-east-1.aws.redhat.com systemd-logind[661]: Removed session 1. ░░ Subject: Session 1 has been terminated ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ Documentation: sd-login(3) ░░ ░░ A session with the ID 1 has been terminated. Jan 18 11:26:41 ip-10-31-12-161.us-east-1.aws.redhat.com sshd-session[4388]: Accepted publickey for root from 10.31.11.42 port 54246 ssh2: RSA SHA256:W3cSdmPJK+d9RwU97ardijPXIZnxHswrpTHWW9oYtEU Jan 18 11:26:41 ip-10-31-12-161.us-east-1.aws.redhat.com sshd-session[4389]: Accepted publickey for root from 10.31.11.42 port 54248 ssh2: RSA SHA256:W3cSdmPJK+d9RwU97ardijPXIZnxHswrpTHWW9oYtEU Jan 18 11:26:41 ip-10-31-12-161.us-east-1.aws.redhat.com systemd-logind[661]: New session 3 of user root. ░░ Subject: A new session 3 has been created for user root ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ Documentation: sd-login(3) ░░ ░░ A new session with the ID 3 has been created for the user root. ░░ ░░ The leading process of the session is 4388. Jan 18 11:26:41 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Started session-3.scope - Session 3 of User root. ░░ Subject: A start job for unit session-3.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit session-3.scope has finished successfully. ░░ ░░ The job identifier is 827. Jan 18 11:26:41 ip-10-31-12-161.us-east-1.aws.redhat.com systemd-logind[661]: New session 4 of user root. ░░ Subject: A new session 4 has been created for user root ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ Documentation: sd-login(3) ░░ ░░ A new session with the ID 4 has been created for the user root. ░░ ░░ The leading process of the session is 4389. Jan 18 11:26:41 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Started session-4.scope - Session 4 of User root. ░░ Subject: A start job for unit session-4.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit session-4.scope has finished successfully. ░░ ░░ The job identifier is 909. Jan 18 11:26:41 ip-10-31-12-161.us-east-1.aws.redhat.com sshd-session[4388]: pam_unix(sshd:session): session opened for user root(uid=0) by root(uid=0) Jan 18 11:26:41 ip-10-31-12-161.us-east-1.aws.redhat.com sshd-session[4389]: pam_unix(sshd:session): session opened for user root(uid=0) by root(uid=0) Jan 18 11:26:41 ip-10-31-12-161.us-east-1.aws.redhat.com sshd-session[4395]: Received disconnect from 10.31.11.42 port 54248:11: disconnected by user Jan 18 11:26:41 ip-10-31-12-161.us-east-1.aws.redhat.com sshd-session[4395]: Disconnected from user root 10.31.11.42 port 54248 Jan 18 11:26:41 ip-10-31-12-161.us-east-1.aws.redhat.com sshd-session[4389]: pam_unix(sshd:session): session closed for user root Jan 18 11:26:41 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: session-4.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit session-4.scope has successfully entered the 'dead' state. Jan 18 11:26:41 ip-10-31-12-161.us-east-1.aws.redhat.com systemd-logind[661]: Session 4 logged out. Waiting for processes to exit. Jan 18 11:26:41 ip-10-31-12-161.us-east-1.aws.redhat.com systemd-logind[661]: Removed session 4. ░░ Subject: Session 4 has been terminated ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ Documentation: sd-login(3) ░░ ░░ A session with the ID 4 has been terminated. Jan 18 11:27:05 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Starting systemd-hostnamed.service - Hostname Service... ░░ Subject: A start job for unit systemd-hostnamed.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-hostnamed.service has begun execution. ░░ ░░ The job identifier is 991. Jan 18 11:27:05 ip-10-31-12-161.us-east-1.aws.redhat.com systemd[1]: Started systemd-hostnamed.service - Hostname Service. ░░ Subject: A start job for unit systemd-hostnamed.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-hostnamed.service has finished successfully. ░░ ░░ The job identifier is 991. Jan 18 11:27:05 managed-node1 systemd-hostnamed[5843]: Hostname set to (static) Jan 18 11:27:05 managed-node1 NetworkManager[711]: [1737217625.1812] hostname: static hostname changed from "ip-10-31-12-161.us-east-1.aws.redhat.com" to "managed-node1" Jan 18 11:27:05 managed-node1 systemd[1]: Starting NetworkManager-dispatcher.service - Network Manager Script Dispatcher Service... ░░ Subject: A start job for unit NetworkManager-dispatcher.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has begun execution. ░░ ░░ The job identifier is 1069. Jan 18 11:27:05 managed-node1 systemd[1]: Started NetworkManager-dispatcher.service - Network Manager Script Dispatcher Service. ░░ Subject: A start job for unit NetworkManager-dispatcher.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has finished successfully. ░░ ░░ The job identifier is 1069. Jan 18 11:27:15 managed-node1 systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit NetworkManager-dispatcher.service has successfully entered the 'dead' state. Jan 18 11:27:33 managed-node1 sshd-session[6513]: Accepted publickey for root from 10.31.43.51 port 59536 ssh2: RSA SHA256:9j1blwt3wcrRiGYZQ7ZGu9axm3cDklH6/z4c+Ee8CzE Jan 18 11:27:33 managed-node1 systemd-logind[661]: New session 5 of user root. ░░ Subject: A new session 5 has been created for user root ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ Documentation: sd-login(3) ░░ ░░ A new session with the ID 5 has been created for the user root. ░░ ░░ The leading process of the session is 6513. Jan 18 11:27:33 managed-node1 systemd[1]: Started session-5.scope - Session 5 of User root. ░░ Subject: A start job for unit session-5.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit session-5.scope has finished successfully. ░░ ░░ The job identifier is 1148. Jan 18 11:27:33 managed-node1 sshd-session[6513]: pam_unix(sshd:session): session opened for user root(uid=0) by root(uid=0) Jan 18 11:27:35 managed-node1 systemd[1]: systemd-hostnamed.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit systemd-hostnamed.service has successfully entered the 'dead' state. Jan 18 11:27:35 managed-node1 python3.12[6665]: ansible-ansible.legacy.setup Invoked with gather_subset=['all'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d Jan 18 11:27:36 managed-node1 python3.12[6819]: ansible-stat Invoked with path=/run/ostree-booted follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jan 18 11:27:37 managed-node1 python3.12[6944]: ansible-stat Invoked with path=/sbin/transactional-update follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jan 18 11:27:38 managed-node1 sudo[7194]: root : TTY=pts/0 ; PWD=/root ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ecggmjqitbnmvffvmmvciguphmjxzmlw ; /usr/bin/python3.12 /root/.ansible/tmp/ansible-tmp-1737217658.4395213-6967-33253008089585/AnsiballZ_dnf.py' Jan 18 11:27:38 managed-node1 sudo[7194]: pam_unix(sudo:session): session opened for user root(uid=0) by root(uid=0) Jan 18 11:27:38 managed-node1 python3.12[7197]: ansible-ansible.legacy.dnf Invoked with name=['iptables-nft', 'podman', 'shadow-utils-subid'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None Jan 18 11:27:48 managed-node1 kernel: SELinux: Converting 381 SID table entries... Jan 18 11:27:48 managed-node1 kernel: SELinux: policy capability network_peer_controls=1 Jan 18 11:27:48 managed-node1 kernel: SELinux: policy capability open_perms=1 Jan 18 11:27:48 managed-node1 kernel: SELinux: policy capability extended_socket_class=1 Jan 18 11:27:48 managed-node1 kernel: SELinux: policy capability always_check_network=0 Jan 18 11:27:48 managed-node1 kernel: SELinux: policy capability cgroup_seclabel=1 Jan 18 11:27:48 managed-node1 kernel: SELinux: policy capability nnp_nosuid_transition=1 Jan 18 11:27:48 managed-node1 kernel: SELinux: policy capability genfs_seclabel_symlinks=1 Jan 18 11:27:48 managed-node1 kernel: SELinux: policy capability ioctl_skip_cloexec=0 Jan 18 11:27:48 managed-node1 kernel: SELinux: policy capability userspace_initial_context=0 Jan 18 11:27:50 managed-node1 kernel: SELinux: Converting 382 SID table entries... Jan 18 11:27:50 managed-node1 kernel: SELinux: policy capability network_peer_controls=1 Jan 18 11:27:50 managed-node1 kernel: SELinux: policy capability open_perms=1 Jan 18 11:27:50 managed-node1 kernel: SELinux: policy capability extended_socket_class=1 Jan 18 11:27:50 managed-node1 kernel: SELinux: policy capability always_check_network=0 Jan 18 11:27:50 managed-node1 kernel: SELinux: policy capability cgroup_seclabel=1 Jan 18 11:27:50 managed-node1 kernel: SELinux: policy capability nnp_nosuid_transition=1 Jan 18 11:27:50 managed-node1 kernel: SELinux: policy capability genfs_seclabel_symlinks=1 Jan 18 11:27:50 managed-node1 kernel: SELinux: policy capability ioctl_skip_cloexec=0 Jan 18 11:27:50 managed-node1 kernel: SELinux: policy capability userspace_initial_context=0 Jan 18 11:27:51 managed-node1 setsebool[7282]: The virt_use_nfs policy boolean was changed to 1 by root Jan 18 11:27:51 managed-node1 setsebool[7282]: The virt_sandbox_use_all_caps policy boolean was changed to 1 by root Jan 18 11:27:54 managed-node1 kernel: SELinux: Converting 389 SID table entries... Jan 18 11:27:54 managed-node1 kernel: SELinux: policy capability network_peer_controls=1 Jan 18 11:27:54 managed-node1 kernel: SELinux: policy capability open_perms=1 Jan 18 11:27:54 managed-node1 kernel: SELinux: policy capability extended_socket_class=1 Jan 18 11:27:54 managed-node1 kernel: SELinux: policy capability always_check_network=0 Jan 18 11:27:54 managed-node1 kernel: SELinux: policy capability cgroup_seclabel=1 Jan 18 11:27:54 managed-node1 kernel: SELinux: policy capability nnp_nosuid_transition=1 Jan 18 11:27:54 managed-node1 kernel: SELinux: policy capability genfs_seclabel_symlinks=1 Jan 18 11:27:54 managed-node1 kernel: SELinux: policy capability ioctl_skip_cloexec=0 Jan 18 11:27:54 managed-node1 kernel: SELinux: policy capability userspace_initial_context=0 Jan 18 11:27:54 managed-node1 groupadd[7304]: group added to /etc/group: name=polkitd, GID=114 Jan 18 11:27:54 managed-node1 groupadd[7304]: group added to /etc/gshadow: name=polkitd Jan 18 11:27:54 managed-node1 groupadd[7304]: new group: name=polkitd, GID=114 Jan 18 11:27:54 managed-node1 useradd[7307]: new user: name=polkitd, UID=114, GID=114, home=/, shell=/sbin/nologin, from=none Jan 18 11:27:54 managed-node1 dbus-broker-launch[654]: Noticed file-system modification, trigger reload. ░░ Subject: A configuration directory was written to ░░ Defined-By: dbus-broker ░░ Support: https://groups.google.com/forum/#!forum/bus1-devel ░░ ░░ A write was detected to one of the directories containing D-Bus configuration ░░ files, triggering a configuration reload. ░░ ░░ This functionality exists for backwards compatibility to pick up changes to ░░ D-Bus configuration without an explicit reolad request. Typically when ░░ installing or removing third-party software causes D-Bus configuration files ░░ to be added or removed. ░░ ░░ It is worth noting that this may cause partial configuration to be loaded in ░░ case dispatching this notification races with the writing of the configuration ░░ files. However, a future notification will then cause the configuration to be ░░ reladed again. Jan 18 11:27:55 managed-node1 dbus-broker-launch[654]: Noticed file-system modification, trigger reload. ░░ Subject: A configuration directory was written to ░░ Defined-By: dbus-broker ░░ Support: https://groups.google.com/forum/#!forum/bus1-devel ░░ ░░ A write was detected to one of the directories containing D-Bus configuration ░░ files, triggering a configuration reload. ░░ ░░ This functionality exists for backwards compatibility to pick up changes to ░░ D-Bus configuration without an explicit reolad request. Typically when ░░ installing or removing third-party software causes D-Bus configuration files ░░ to be added or removed. ░░ ░░ It is worth noting that this may cause partial configuration to be loaded in ░░ case dispatching this notification races with the writing of the configuration ░░ files. However, a future notification will then cause the configuration to be ░░ reladed again. Jan 18 11:27:55 managed-node1 dbus-broker-launch[654]: Noticed file-system modification, trigger reload. ░░ Subject: A configuration directory was written to ░░ Defined-By: dbus-broker ░░ Support: https://groups.google.com/forum/#!forum/bus1-devel ░░ ░░ A write was detected to one of the directories containing D-Bus configuration ░░ files, triggering a configuration reload. ░░ ░░ This functionality exists for backwards compatibility to pick up changes to ░░ D-Bus configuration without an explicit reolad request. Typically when ░░ installing or removing third-party software causes D-Bus configuration files ░░ to be added or removed. ░░ ░░ It is worth noting that this may cause partial configuration to be loaded in ░░ case dispatching this notification races with the writing of the configuration ░░ files. However, a future notification will then cause the configuration to be ░░ reladed again. Jan 18 11:27:55 managed-node1 systemd[1]: Listening on pcscd.socket - PC/SC Smart Card Daemon Activation Socket. ░░ Subject: A start job for unit pcscd.socket has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit pcscd.socket has finished successfully. ░░ ░░ The job identifier is 1233. Jan 18 11:27:56 managed-node1 systemd[1]: Started run-p7706-i8006.service - [systemd-run] /usr/bin/systemctl start man-db-cache-update. ░░ Subject: A start job for unit run-p7706-i8006.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit run-p7706-i8006.service has finished successfully. ░░ ░░ The job identifier is 1311. Jan 18 11:27:56 managed-node1 systemctl[7707]: Warning: The unit file, source configuration file or drop-ins of man-db-cache-update.service changed on disk. Run 'systemctl daemon-reload' to reload units. Jan 18 11:27:56 managed-node1 systemd[1]: Starting man-db-cache-update.service... ░░ Subject: A start job for unit man-db-cache-update.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit man-db-cache-update.service has begun execution. ░░ ░░ The job identifier is 1389. Jan 18 11:27:56 managed-node1 systemd[1]: Reload requested from client PID 7710 ('systemctl') (unit session-5.scope)... Jan 18 11:27:56 managed-node1 systemd[1]: Reloading... Jan 18 11:27:56 managed-node1 systemd-ssh-generator[7749]: Failed to query local AF_VSOCK CID: Permission denied Jan 18 11:27:56 managed-node1 systemd-rc-local-generator[7746]: /etc/rc.d/rc.local is not marked executable, skipping. Jan 18 11:27:56 managed-node1 (sd-exec-[7731]: /usr/lib/systemd/system-generators/systemd-ssh-generator failed with exit status 1. Jan 18 11:27:56 managed-node1 systemd[1]: Reloading finished in 266 ms. Jan 18 11:27:56 managed-node1 systemd[1]: Queuing reload/restart jobs for marked units… Jan 18 11:27:56 managed-node1 systemd[1]: Reloading user@0.service - User Manager for UID 0... ░░ Subject: A reload job for unit user@0.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A reload job for unit user@0.service has begun execution. ░░ ░░ The job identifier is 1467. Jan 18 11:27:56 managed-node1 systemd[4341]: Received SIGRTMIN+25 from PID 1 (systemd). Jan 18 11:27:56 managed-node1 systemd[4341]: Reexecuting. Jan 18 11:27:56 managed-node1 systemd[1]: Reloaded user@0.service - User Manager for UID 0. ░░ Subject: A reload job for unit user@0.service has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A reload job for unit user@0.service has finished. ░░ ░░ The job identifier is 1467 and the job result is done. Jan 18 11:27:57 managed-node1 sudo[7194]: pam_unix(sudo:session): session closed for user root Jan 18 11:27:58 managed-node1 python3.12[8436]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jan 18 11:27:58 managed-node1 systemd[1]: man-db-cache-update.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit man-db-cache-update.service has successfully entered the 'dead' state. Jan 18 11:27:58 managed-node1 systemd[1]: Finished man-db-cache-update.service. ░░ Subject: A start job for unit man-db-cache-update.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit man-db-cache-update.service has finished successfully. ░░ ░░ The job identifier is 1389. Jan 18 11:27:58 managed-node1 systemd[1]: run-p7706-i8006.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-p7706-i8006.service has successfully entered the 'dead' state. Jan 18 11:27:59 managed-node1 python3.12[8577]: ansible-getent Invoked with database=passwd key=root fail_key=False service=None split=None Jan 18 11:27:59 managed-node1 python3.12[8709]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jan 18 11:28:01 managed-node1 python3.12[8842]: ansible-tempfile Invoked with prefix=lsr_ suffix=_podman state=directory path=None Jan 18 11:28:01 managed-node1 python3.12[8973]: ansible-file Invoked with path=/tmp/lsr_pjrhafco_podman/auth state=directory mode=0700 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jan 18 11:28:02 managed-node1 python3.12[9104]: ansible-stat Invoked with path=/run/ostree-booted follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jan 18 11:28:03 managed-node1 python3.12[9235]: ansible-ansible.legacy.dnf Invoked with name=['python3-pyasn1', 'python3-cryptography', 'python3-dbus'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None Jan 18 11:28:05 managed-node1 python3.12[9378]: ansible-ansible.legacy.dnf Invoked with name=['certmonger', 'python3-packaging'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None Jan 18 11:28:06 managed-node1 dbus-broker-launch[654]: Noticed file-system modification, trigger reload. ░░ Subject: A configuration directory was written to ░░ Defined-By: dbus-broker ░░ Support: https://groups.google.com/forum/#!forum/bus1-devel ░░ ░░ A write was detected to one of the directories containing D-Bus configuration ░░ files, triggering a configuration reload. ░░ ░░ This functionality exists for backwards compatibility to pick up changes to ░░ D-Bus configuration without an explicit reolad request. Typically when ░░ installing or removing third-party software causes D-Bus configuration files ░░ to be added or removed. ░░ ░░ It is worth noting that this may cause partial configuration to be loaded in ░░ case dispatching this notification races with the writing of the configuration ░░ files. However, a future notification will then cause the configuration to be ░░ reladed again. Jan 18 11:28:06 managed-node1 dbus-broker-launch[654]: Noticed file-system modification, trigger reload. ░░ Subject: A configuration directory was written to ░░ Defined-By: dbus-broker ░░ Support: https://groups.google.com/forum/#!forum/bus1-devel ░░ ░░ A write was detected to one of the directories containing D-Bus configuration ░░ files, triggering a configuration reload. ░░ ░░ This functionality exists for backwards compatibility to pick up changes to ░░ D-Bus configuration without an explicit reolad request. Typically when ░░ installing or removing third-party software causes D-Bus configuration files ░░ to be added or removed. ░░ ░░ It is worth noting that this may cause partial configuration to be loaded in ░░ case dispatching this notification races with the writing of the configuration ░░ files. However, a future notification will then cause the configuration to be ░░ reladed again. Jan 18 11:28:06 managed-node1 dbus-broker-launch[654]: Noticed file-system modification, trigger reload. ░░ Subject: A configuration directory was written to ░░ Defined-By: dbus-broker ░░ Support: https://groups.google.com/forum/#!forum/bus1-devel ░░ ░░ A write was detected to one of the directories containing D-Bus configuration ░░ files, triggering a configuration reload. ░░ ░░ This functionality exists for backwards compatibility to pick up changes to ░░ D-Bus configuration without an explicit reolad request. Typically when ░░ installing or removing third-party software causes D-Bus configuration files ░░ to be added or removed. ░░ ░░ It is worth noting that this may cause partial configuration to be loaded in ░░ case dispatching this notification races with the writing of the configuration ░░ files. However, a future notification will then cause the configuration to be ░░ reladed again. Jan 18 11:28:06 managed-node1 systemd[1]: Reload requested from client PID 9397 ('systemctl') (unit session-5.scope)... Jan 18 11:28:06 managed-node1 systemd[1]: Reloading... Jan 18 11:28:06 managed-node1 systemd-rc-local-generator[9443]: /etc/rc.d/rc.local is not marked executable, skipping. Jan 18 11:28:06 managed-node1 systemd-ssh-generator[9445]: Failed to query local AF_VSOCK CID: Permission denied Jan 18 11:28:06 managed-node1 (sd-exec-[9415]: /usr/lib/systemd/system-generators/systemd-ssh-generator failed with exit status 1. Jan 18 11:28:06 managed-node1 systemd[1]: Reloading finished in 192 ms. Jan 18 11:28:06 managed-node1 systemd[1]: Started run-p9454-i9754.service - [systemd-run] /usr/bin/systemctl start man-db-cache-update. ░░ Subject: A start job for unit run-p9454-i9754.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit run-p9454-i9754.service has finished successfully. ░░ ░░ The job identifier is 1472. Jan 18 11:28:06 managed-node1 systemd[1]: Starting man-db-cache-update.service... ░░ Subject: A start job for unit man-db-cache-update.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit man-db-cache-update.service has begun execution. ░░ ░░ The job identifier is 1550. Jan 18 11:28:06 managed-node1 systemd[1]: Reload requested from client PID 9458 ('systemctl') (unit session-5.scope)... Jan 18 11:28:06 managed-node1 systemd[1]: Reloading... Jan 18 11:28:06 managed-node1 systemd-rc-local-generator[9505]: /etc/rc.d/rc.local is not marked executable, skipping. Jan 18 11:28:06 managed-node1 systemd-ssh-generator[9507]: Failed to query local AF_VSOCK CID: Permission denied Jan 18 11:28:06 managed-node1 (sd-exec-[9480]: /usr/lib/systemd/system-generators/systemd-ssh-generator failed with exit status 1. Jan 18 11:28:07 managed-node1 systemd[1]: Reloading finished in 336 ms. Jan 18 11:28:07 managed-node1 systemd[1]: Queuing reload/restart jobs for marked units… Jan 18 11:28:07 managed-node1 systemd[1]: man-db-cache-update.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit man-db-cache-update.service has successfully entered the 'dead' state. Jan 18 11:28:07 managed-node1 systemd[1]: Finished man-db-cache-update.service. ░░ Subject: A start job for unit man-db-cache-update.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit man-db-cache-update.service has finished successfully. ░░ ░░ The job identifier is 1550. Jan 18 11:28:07 managed-node1 systemd[1]: run-p9454-i9754.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-p9454-i9754.service has successfully entered the 'dead' state. Jan 18 11:28:07 managed-node1 python3.12[9651]: ansible-file Invoked with name=/etc/certmonger//pre-scripts owner=root group=root mode=0700 state=directory path=/etc/certmonger//pre-scripts recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Jan 18 11:28:08 managed-node1 python3.12[9782]: ansible-file Invoked with name=/etc/certmonger//post-scripts owner=root group=root mode=0700 state=directory path=/etc/certmonger//post-scripts recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Jan 18 11:28:09 managed-node1 python3.12[9913]: ansible-ansible.legacy.systemd Invoked with name=certmonger state=started enabled=True daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None Jan 18 11:28:09 managed-node1 systemd[1]: Reload requested from client PID 9916 ('systemctl') (unit session-5.scope)... Jan 18 11:28:09 managed-node1 systemd[1]: Reloading... Jan 18 11:28:09 managed-node1 systemd-rc-local-generator[9960]: /etc/rc.d/rc.local is not marked executable, skipping. Jan 18 11:28:09 managed-node1 systemd-ssh-generator[9963]: Failed to query local AF_VSOCK CID: Permission denied Jan 18 11:28:09 managed-node1 (sd-exec-[9934]: /usr/lib/systemd/system-generators/systemd-ssh-generator failed with exit status 1. Jan 18 11:28:09 managed-node1 systemd[1]: Reloading finished in 197 ms. Jan 18 11:28:09 managed-node1 systemd[1]: Starting certmonger.service - Certificate monitoring and PKI enrollment... ░░ Subject: A start job for unit certmonger.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit certmonger.service has begun execution. ░░ ░░ The job identifier is 1628. Jan 18 11:28:09 managed-node1 (rtmonger)[9971]: certmonger.service: Referenced but unset environment variable evaluates to an empty string: OPTS Jan 18 11:28:09 managed-node1 systemd[1]: Started certmonger.service - Certificate monitoring and PKI enrollment. ░░ Subject: A start job for unit certmonger.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit certmonger.service has finished successfully. ░░ ░░ The job identifier is 1628. Jan 18 11:28:10 managed-node1 python3.12[10129]: ansible-fedora.linux_system_roles.certificate_request Invoked with name=podman_registry dns=['localhost', '127.0.0.1'] directory=/etc/pki/tls wait=True ca=self-sign __header=# # Ansible managed # # system_role:certificate provider_config_directory=/etc/certmonger provider=certmonger key_usage=['digitalSignature', 'keyEncipherment'] extended_key_usage=['id-kp-serverAuth', 'id-kp-clientAuth'] auto_renew=True ip=None email=None common_name=None country=None state=None locality=None organization=None organizational_unit=None contact_email=None key_size=None owner=None group=None mode=None principal=None run_before=None run_after=None Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:10 managed-node1 certmonger[10144]: Certificate in file "/etc/pki/tls/certs/podman_registry.crt" issued by CA and saved. Jan 18 11:28:10 managed-node1 certmonger[9971]: 2025-01-18 11:28:10 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:11 managed-node1 python3.12[10275]: ansible-slurp Invoked with path=/etc/pki/tls/certs/podman_registry.crt src=/etc/pki/tls/certs/podman_registry.crt Jan 18 11:28:11 managed-node1 python3.12[10406]: ansible-slurp Invoked with path=/etc/pki/tls/private/podman_registry.key src=/etc/pki/tls/private/podman_registry.key Jan 18 11:28:11 managed-node1 python3.12[10537]: ansible-slurp Invoked with path=/etc/pki/tls/certs/podman_registry.crt src=/etc/pki/tls/certs/podman_registry.crt Jan 18 11:28:12 managed-node1 python3.12[10668]: ansible-ansible.legacy.command Invoked with _raw_params=getcert stop-tracking -f /etc/pki/tls/certs/podman_registry.crt _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jan 18 11:28:12 managed-node1 certmonger[9971]: 2025-01-18 11:28:12 [9971] Wrote to /var/lib/certmonger/requests/20250118162810 Jan 18 11:28:12 managed-node1 python3.12[10800]: ansible-file Invoked with path=/etc/pki/tls/certs/podman_registry.crt state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jan 18 11:28:13 managed-node1 python3.12[10931]: ansible-file Invoked with path=/etc/pki/tls/private/podman_registry.key state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jan 18 11:28:13 managed-node1 python3.12[11062]: ansible-file Invoked with path=/etc/pki/tls/certs/podman_registry.crt state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jan 18 11:28:14 managed-node1 python3.12[11193]: ansible-ansible.legacy.stat Invoked with path=/tmp/lsr_pjrhafco_podman/auth/registry_cert.crt follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Jan 18 11:28:14 managed-node1 python3.12[11298]: ansible-ansible.legacy.copy Invoked with dest=/tmp/lsr_pjrhafco_podman/auth/registry_cert.crt mode=0600 src=/root/.ansible/tmp/ansible-tmp-1737217693.814509-7893-50960251601282/.source.crt _original_basename=.sf0hyasi follow=False checksum=e2d7e6ef6749810cf6e2d8aae103158b6bc05f37 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jan 18 11:28:14 managed-node1 python3.12[11429]: ansible-ansible.legacy.stat Invoked with path=/tmp/lsr_pjrhafco_podman/auth/registry_key.pem follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Jan 18 11:28:15 managed-node1 python3.12[11534]: ansible-ansible.legacy.copy Invoked with dest=/tmp/lsr_pjrhafco_podman/auth/registry_key.pem mode=0600 src=/root/.ansible/tmp/ansible-tmp-1737217694.6593502-7925-207039514606112/.source.pem _original_basename=.x4xwg5es follow=False checksum=69c8e519848469056bae04f7cb3e5bd8d039ff36 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jan 18 11:28:15 managed-node1 python3.12[11665]: ansible-ansible.legacy.stat Invoked with path=/tmp/lsr_pjrhafco_podman/auth/ca.crt follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Jan 18 11:28:16 managed-node1 python3.12[11770]: ansible-ansible.legacy.copy Invoked with dest=/tmp/lsr_pjrhafco_podman/auth/ca.crt mode=0600 src=/root/.ansible/tmp/ansible-tmp-1737217695.4154303-7964-28971117670208/.source.crt _original_basename=.c_gb9adz follow=False checksum=e2d7e6ef6749810cf6e2d8aae103158b6bc05f37 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jan 18 11:28:16 managed-node1 python3.12[11901]: ansible-ansible.legacy.dnf Invoked with name=['httpd-tools', 'skopeo'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None Jan 18 11:28:18 managed-node1 systemd[1]: Started run-p11915-i12215.service - [systemd-run] /usr/bin/systemctl start man-db-cache-update. ░░ Subject: A start job for unit run-p11915-i12215.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit run-p11915-i12215.service has finished successfully. ░░ ░░ The job identifier is 1707. Jan 18 11:28:18 managed-node1 systemd[1]: Starting man-db-cache-update.service... ░░ Subject: A start job for unit man-db-cache-update.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit man-db-cache-update.service has begun execution. ░░ ░░ The job identifier is 1785. Jan 18 11:28:19 managed-node1 systemd[1]: man-db-cache-update.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit man-db-cache-update.service has successfully entered the 'dead' state. Jan 18 11:28:19 managed-node1 systemd[1]: Finished man-db-cache-update.service. ░░ Subject: A start job for unit man-db-cache-update.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit man-db-cache-update.service has finished successfully. ░░ ░░ The job identifier is 1785. Jan 18 11:28:19 managed-node1 systemd[1]: run-p11915-i12215.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-p11915-i12215.service has successfully entered the 'dead' state. Jan 18 11:28:20 managed-node1 python3.12[12422]: ansible-ansible.legacy.command Invoked with _raw_params=podman run -d -p 127.0.0.1:5000:5000 --name podman_registry -v /tmp/lsr_pjrhafco_podman/auth:/auth:Z -e REGISTRY_AUTH=htpasswd -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" -e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd -e REGISTRY_HTTP_TLS_CERTIFICATE=/auth/registry_cert.crt -e REGISTRY_HTTP_TLS_KEY=/auth/registry_key.pem quay.io/libpod/registry:2.8.2 _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jan 18 11:28:20 managed-node1 systemd[1]: var-lib-containers-storage-overlay-compat2297044060-merged.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay-compat2297044060-merged.mount has successfully entered the 'dead' state. Jan 18 11:28:20 managed-node1 kernel: evm: overlay not supported Jan 18 11:28:20 managed-node1 systemd[1]: var-lib-containers-storage-overlay-metacopy\x2dcheck3987118804-merged.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay-metacopy\x2dcheck3987118804-merged.mount has successfully entered the 'dead' state. Jan 18 11:28:20 managed-node1 podman[12423]: 2025-01-18 11:28:20.843076291 -0500 EST m=+0.079374761 system refresh Jan 18 11:28:22 managed-node1 systemd[1]: var-lib-containers-storage-overlay-compat2846827946-lower\x2dmapped.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay-compat2846827946-lower\x2dmapped.mount has successfully entered the 'dead' state. Jan 18 11:28:23 managed-node1 podman[12423]: 2025-01-18 11:28:23.234995452 -0500 EST m=+2.471294120 image pull 0030ba3d620c647159c935ee778991c68ef3e51a274703753b0bc530104ef5e5 quay.io/libpod/registry:2.8.2 Jan 18 11:28:23 managed-node1 podman[12423]: 2025-01-18 11:28:23.255040284 -0500 EST m=+2.491338773 volume create 5b91e845725b9a38c4db654f5a648d0520f98a70531e23c7c4ebc3dd02be25a3 Jan 18 11:28:23 managed-node1 podman[12423]: 2025-01-18 11:28:23.265995799 -0500 EST m=+2.502294290 container create ed1781922e1bec043a5e91446e718c6b311b91797a840ed09881ada3ac6379b7 (image=quay.io/libpod/registry:2.8.2, name=podman_registry) Jan 18 11:28:23 managed-node1 kernel: bridge: filtering via arp/ip/ip6tables is no longer available by default. Update your scripts to load br_netfilter if you need this. Jan 18 11:28:23 managed-node1 NetworkManager[711]: [1737217703.3121] manager: (podman0): new Bridge device (/org/freedesktop/NetworkManager/Devices/3) Jan 18 11:28:23 managed-node1 (udev-worker)[12509]: Network interface NamePolicy= disabled on kernel command line. Jan 18 11:28:23 managed-node1 kernel: podman0: port 1(veth0) entered blocking state Jan 18 11:28:23 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:28:23 managed-node1 kernel: veth0: entered allmulticast mode Jan 18 11:28:23 managed-node1 kernel: veth0: entered promiscuous mode Jan 18 11:28:23 managed-node1 kernel: podman0: port 1(veth0) entered blocking state Jan 18 11:28:23 managed-node1 kernel: podman0: port 1(veth0) entered forwarding state Jan 18 11:28:23 managed-node1 NetworkManager[711]: [1737217703.3266] manager: (veth0): new Veth device (/org/freedesktop/NetworkManager/Devices/4) Jan 18 11:28:23 managed-node1 NetworkManager[711]: [1737217703.3298] device (podman0): carrier: link connected Jan 18 11:28:23 managed-node1 NetworkManager[711]: [1737217703.3299] device (veth0): carrier: link connected Jan 18 11:28:23 managed-node1 NetworkManager[711]: [1737217703.3324] device (podman0): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external') Jan 18 11:28:23 managed-node1 NetworkManager[711]: [1737217703.3337] device (podman0): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'external') Jan 18 11:28:23 managed-node1 NetworkManager[711]: [1737217703.3343] device (podman0): Activation: starting connection 'podman0' (7e61ee2e-4601-477b-80d6-a04deed50d8c) Jan 18 11:28:23 managed-node1 NetworkManager[711]: [1737217703.3344] device (podman0): state change: disconnected -> prepare (reason 'none', managed-type: 'external') Jan 18 11:28:23 managed-node1 NetworkManager[711]: [1737217703.3346] device (podman0): state change: prepare -> config (reason 'none', managed-type: 'external') Jan 18 11:28:23 managed-node1 (udev-worker)[12430]: Network interface NamePolicy= disabled on kernel command line. Jan 18 11:28:23 managed-node1 NetworkManager[711]: [1737217703.3373] device (podman0): state change: config -> ip-config (reason 'none', managed-type: 'external') Jan 18 11:28:23 managed-node1 NetworkManager[711]: [1737217703.3375] device (podman0): state change: ip-config -> ip-check (reason 'none', managed-type: 'external') Jan 18 11:28:23 managed-node1 systemd[1]: Starting NetworkManager-dispatcher.service - Network Manager Script Dispatcher Service... ░░ Subject: A start job for unit NetworkManager-dispatcher.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has begun execution. ░░ ░░ The job identifier is 1863. Jan 18 11:28:23 managed-node1 systemd[1]: Started NetworkManager-dispatcher.service - Network Manager Script Dispatcher Service. ░░ Subject: A start job for unit NetworkManager-dispatcher.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has finished successfully. ░░ ░░ The job identifier is 1863. Jan 18 11:28:23 managed-node1 NetworkManager[711]: [1737217703.3958] device (podman0): state change: ip-check -> secondaries (reason 'none', managed-type: 'external') Jan 18 11:28:23 managed-node1 NetworkManager[711]: [1737217703.3961] device (podman0): state change: secondaries -> activated (reason 'none', managed-type: 'external') Jan 18 11:28:23 managed-node1 NetworkManager[711]: [1737217703.3966] device (podman0): Activation: successful, device activated. Jan 18 11:28:23 managed-node1 systemd[1]: Created slice machine.slice - Slice /machine. ░░ Subject: A start job for unit machine.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit machine.slice has finished successfully. ░░ ░░ The job identifier is 1943. Jan 18 11:28:23 managed-node1 systemd[1]: Started libpod-conmon-ed1781922e1bec043a5e91446e718c6b311b91797a840ed09881ada3ac6379b7.scope. ░░ Subject: A start job for unit libpod-conmon-ed1781922e1bec043a5e91446e718c6b311b91797a840ed09881ada3ac6379b7.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-conmon-ed1781922e1bec043a5e91446e718c6b311b91797a840ed09881ada3ac6379b7.scope has finished successfully. ░░ ░░ The job identifier is 1942. Jan 18 11:28:23 managed-node1 systemd[1]: Started libpod-ed1781922e1bec043a5e91446e718c6b311b91797a840ed09881ada3ac6379b7.scope - libcrun container. ░░ Subject: A start job for unit libpod-ed1781922e1bec043a5e91446e718c6b311b91797a840ed09881ada3ac6379b7.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-ed1781922e1bec043a5e91446e718c6b311b91797a840ed09881ada3ac6379b7.scope has finished successfully. ░░ ░░ The job identifier is 1948. Jan 18 11:28:23 managed-node1 podman[12423]: 2025-01-18 11:28:23.608384003 -0500 EST m=+2.844682591 container init ed1781922e1bec043a5e91446e718c6b311b91797a840ed09881ada3ac6379b7 (image=quay.io/libpod/registry:2.8.2, name=podman_registry) Jan 18 11:28:23 managed-node1 podman[12423]: 2025-01-18 11:28:23.611994214 -0500 EST m=+2.848292818 container start ed1781922e1bec043a5e91446e718c6b311b91797a840ed09881ada3ac6379b7 (image=quay.io/libpod/registry:2.8.2, name=podman_registry) Jan 18 11:28:23 managed-node1 systemd[1]: libpod-ed1781922e1bec043a5e91446e718c6b311b91797a840ed09881ada3ac6379b7.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-ed1781922e1bec043a5e91446e718c6b311b91797a840ed09881ada3ac6379b7.scope has successfully entered the 'dead' state. Jan 18 11:28:23 managed-node1 podman[12583]: 2025-01-18 11:28:23.659873262 -0500 EST m=+0.032506167 container died ed1781922e1bec043a5e91446e718c6b311b91797a840ed09881ada3ac6379b7 (image=quay.io/libpod/registry:2.8.2, name=podman_registry) Jan 18 11:28:23 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:28:23 managed-node1 kernel: veth0 (unregistering): left allmulticast mode Jan 18 11:28:23 managed-node1 kernel: veth0 (unregistering): left promiscuous mode Jan 18 11:28:23 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:28:23 managed-node1 NetworkManager[711]: [1737217703.6969] device (podman0): state change: activated -> unmanaged (reason 'unmanaged', managed-type: 'removed') Jan 18 11:28:23 managed-node1 systemd[1]: run-netns-netns\x2d3e2b2dc4\x2d7b8c\x2d05da\x2d804f\x2dd4a3f0f407c9.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-netns-netns\x2d3e2b2dc4\x2d7b8c\x2d05da\x2d804f\x2dd4a3f0f407c9.mount has successfully entered the 'dead' state. Jan 18 11:28:23 managed-node1 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-ed1781922e1bec043a5e91446e718c6b311b91797a840ed09881ada3ac6379b7-userdata-shm.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay\x2dcontainers-ed1781922e1bec043a5e91446e718c6b311b91797a840ed09881ada3ac6379b7-userdata-shm.mount has successfully entered the 'dead' state. Jan 18 11:28:23 managed-node1 systemd[1]: var-lib-containers-storage-overlay-13ef4bc2c39973b84a982a72f8a097e0906cc9de618ea45451c5c2a39e57c85d-merged.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay-13ef4bc2c39973b84a982a72f8a097e0906cc9de618ea45451c5c2a39e57c85d-merged.mount has successfully entered the 'dead' state. Jan 18 11:28:23 managed-node1 podman[12583]: 2025-01-18 11:28:23.780630912 -0500 EST m=+0.153263822 container cleanup ed1781922e1bec043a5e91446e718c6b311b91797a840ed09881ada3ac6379b7 (image=quay.io/libpod/registry:2.8.2, name=podman_registry) Jan 18 11:28:23 managed-node1 systemd[1]: libpod-conmon-ed1781922e1bec043a5e91446e718c6b311b91797a840ed09881ada3ac6379b7.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-conmon-ed1781922e1bec043a5e91446e718c6b311b91797a840ed09881ada3ac6379b7.scope has successfully entered the 'dead' state. Jan 18 11:28:24 managed-node1 python3.12[12734]: ansible-wait_for Invoked with port=5000 host=127.0.0.1 timeout=300 connect_timeout=5 delay=0 active_connection_states=['ESTABLISHED', 'FIN_WAIT1', 'FIN_WAIT2', 'SYN_RECV', 'SYN_SENT', 'TIME_WAIT'] state=started sleep=1 path=None search_regex=None exclude_hosts=None msg=None Jan 18 11:28:24 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jan 18 11:28:33 managed-node1 systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit NetworkManager-dispatcher.service has successfully entered the 'dead' state. Jan 18 11:30:20 managed-node1 systemd[1]: Starting logrotate.service - Rotate log files... ░░ Subject: A start job for unit logrotate.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit logrotate.service has begun execution. ░░ ░░ The job identifier is 1954. Jan 18 11:30:20 managed-node1 systemd[1]: logrotate.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit logrotate.service has successfully entered the 'dead' state. Jan 18 11:30:20 managed-node1 systemd[1]: Finished logrotate.service - Rotate log files. ░░ Subject: A start job for unit logrotate.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit logrotate.service has finished successfully. ░░ ░░ The job identifier is 1954. Jan 18 11:31:46 managed-node1 systemd[4341]: Created slice background.slice - User Background Tasks Slice. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 14. Jan 18 11:31:46 managed-node1 systemd[4341]: Starting systemd-tmpfiles-clean.service - Cleanup of User's Temporary Files and Directories... ░░ Subject: A start job for unit UNIT has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has begun execution. ░░ ░░ The job identifier is 13. Jan 18 11:31:46 managed-node1 systemd[4341]: Finished systemd-tmpfiles-clean.service - Cleanup of User's Temporary Files and Directories. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 13. Jan 18 11:33:26 managed-node1 python3.12[12915]: ansible-ansible.legacy.setup Invoked with gather_subset=['all'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d Jan 18 11:33:27 managed-node1 python3.12[13075]: ansible-stat Invoked with path=/run/ostree-booted follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jan 18 11:33:27 managed-node1 python3.12[13206]: ansible-stat Invoked with path=/sbin/transactional-update follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jan 18 11:33:29 managed-node1 python3.12[13468]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jan 18 11:33:30 managed-node1 python3.12[13605]: ansible-getent Invoked with database=passwd key=root fail_key=False service=None split=None Jan 18 11:33:31 managed-node1 python3.12[13737]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jan 18 11:33:34 managed-node1 python3.12[13870]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jan 18 11:33:36 managed-node1 python3.12[14003]: ansible-file Invoked with path=/etc/containers/systemd state=directory owner=root group=0 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Jan 18 11:33:37 managed-node1 python3.12[14134]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/systemd/quadlet-pod-pod.pod follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Jan 18 11:33:37 managed-node1 python3.12[14239]: ansible-ansible.legacy.copy Invoked with src=/root/.ansible/tmp/ansible-tmp-1737218016.9384396-19502-222946066402839/.source.pod dest=/etc/containers/systemd/quadlet-pod-pod.pod owner=root group=0 mode=0644 follow=False _original_basename=systemd.j2 checksum=1884c880482430d8bf2e944b003734fb8b7a462d backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None Jan 18 11:33:38 managed-node1 python3.12[14370]: ansible-systemd Invoked with daemon_reload=True scope=system daemon_reexec=False no_block=False name=None state=None enabled=None force=None masked=None Jan 18 11:33:38 managed-node1 systemd[1]: Reload requested from client PID 14371 ('systemctl') (unit session-5.scope)... Jan 18 11:33:38 managed-node1 systemd[1]: Reloading... Jan 18 11:33:38 managed-node1 systemd-ssh-generator[14418]: Failed to query local AF_VSOCK CID: Permission denied Jan 18 11:33:38 managed-node1 systemd-rc-local-generator[14416]: /etc/rc.d/rc.local is not marked executable, skipping. Jan 18 11:33:38 managed-node1 (sd-exec-[14390]: /usr/lib/systemd/system-generators/systemd-ssh-generator failed with exit status 1. Jan 18 11:33:39 managed-node1 systemd[1]: Reloading finished in 198 ms. Jan 18 11:33:39 managed-node1 systemd[1]: Starting fstrim.service - Discard unused blocks on filesystems from /etc/fstab... ░░ Subject: A start job for unit fstrim.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit fstrim.service has begun execution. ░░ ░░ The job identifier is 2032. Jan 18 11:33:39 managed-node1 systemd[1]: fstrim.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit fstrim.service has successfully entered the 'dead' state. Jan 18 11:33:39 managed-node1 systemd[1]: Finished fstrim.service - Discard unused blocks on filesystems from /etc/fstab. ░░ Subject: A start job for unit fstrim.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit fstrim.service has finished successfully. ░░ ░░ The job identifier is 2032. Jan 18 11:33:39 managed-node1 python3.12[14558]: ansible-systemd Invoked with name=quadlet-pod-pod-pod.service scope=system state=started daemon_reload=False daemon_reexec=False no_block=False enabled=None force=None masked=None Jan 18 11:33:39 managed-node1 systemd[1]: Starting quadlet-pod-pod-pod.service... ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has begun execution. ░░ ░░ The job identifier is 2110. Jan 18 11:33:40 managed-node1 systemd[1]: var-lib-containers-storage-overlay-4e5b681e4812b861c05696cf9eda4e2b798987d072ef1cbc2f4535e4de1a52d2-merged.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay-4e5b681e4812b861c05696cf9eda4e2b798987d072ef1cbc2f4535e4de1a52d2-merged.mount has successfully entered the 'dead' state. Jan 18 11:33:40 managed-node1 podman[14562]: 2025-01-18 11:33:40.052580435 -0500 EST m=+0.295779721 image build 19fab7a77b4482d542bc2a8ec7a82b765cf4ab7fe7d20dc420147eb574bb42a8 Jan 18 11:33:40 managed-node1 systemd[1]: Created slice machine-libpod_pod_673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3.slice - cgroup machine-libpod_pod_673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3.slice. ░░ Subject: A start job for unit machine-libpod_pod_673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit machine-libpod_pod_673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3.slice has finished successfully. ░░ ░░ The job identifier is 2194. Jan 18 11:33:40 managed-node1 podman[14562]: 2025-01-18 11:33:40.101633199 -0500 EST m=+0.344832376 container create 392362ac44baf9c0901df4ae07776e7099c83a4f2a836a7f761d369f9ea832d6 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:40 managed-node1 podman[14562]: 2025-01-18 11:33:40.107607567 -0500 EST m=+0.350806715 pod create 673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3 (image=, name=quadlet-pod) Jan 18 11:33:40 managed-node1 quadlet-pod-pod-pod[14562]: 673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3 Jan 18 11:33:40 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jan 18 11:33:40 managed-node1 kernel: podman0: port 1(veth0) entered blocking state Jan 18 11:33:40 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:33:40 managed-node1 kernel: veth0: entered allmulticast mode Jan 18 11:33:40 managed-node1 kernel: veth0: entered promiscuous mode Jan 18 11:33:40 managed-node1 kernel: podman0: port 1(veth0) entered blocking state Jan 18 11:33:40 managed-node1 kernel: podman0: port 1(veth0) entered forwarding state Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.1665] manager: (podman0): new Bridge device (/org/freedesktop/NetworkManager/Devices/5) Jan 18 11:33:40 managed-node1 (udev-worker)[14624]: Network interface NamePolicy= disabled on kernel command line. Jan 18 11:33:40 managed-node1 (udev-worker)[14625]: Network interface NamePolicy= disabled on kernel command line. Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.1749] device (veth0): carrier: link connected Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.1752] manager: (veth0): new Veth device (/org/freedesktop/NetworkManager/Devices/6) Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.1755] device (podman0): carrier: link connected Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.2057] device (podman0): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external') Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.2064] device (podman0): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'external') Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.2072] device (podman0): Activation: starting connection 'podman0' (5734d0f6-22f7-4bd2-8489-7ac9c0a2ae63) Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.2076] device (podman0): state change: disconnected -> prepare (reason 'none', managed-type: 'external') Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.2078] device (podman0): state change: prepare -> config (reason 'none', managed-type: 'external') Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.2080] device (podman0): state change: config -> ip-config (reason 'none', managed-type: 'external') Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.2083] device (podman0): state change: ip-config -> ip-check (reason 'none', managed-type: 'external') Jan 18 11:33:40 managed-node1 systemd[1]: Starting NetworkManager-dispatcher.service - Network Manager Script Dispatcher Service... ░░ Subject: A start job for unit NetworkManager-dispatcher.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has begun execution. ░░ ░░ The job identifier is 2200. Jan 18 11:33:40 managed-node1 systemd[1]: Started NetworkManager-dispatcher.service - Network Manager Script Dispatcher Service. ░░ Subject: A start job for unit NetworkManager-dispatcher.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has finished successfully. ░░ ░░ The job identifier is 2200. Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.2397] device (podman0): state change: ip-check -> secondaries (reason 'none', managed-type: 'external') Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.2401] device (podman0): state change: secondaries -> activated (reason 'none', managed-type: 'external') Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.2406] device (podman0): Activation: successful, device activated. Jan 18 11:33:40 managed-node1 systemd[1]: Started libpod-392362ac44baf9c0901df4ae07776e7099c83a4f2a836a7f761d369f9ea832d6.scope - libcrun container. ░░ Subject: A start job for unit libpod-392362ac44baf9c0901df4ae07776e7099c83a4f2a836a7f761d369f9ea832d6.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-392362ac44baf9c0901df4ae07776e7099c83a4f2a836a7f761d369f9ea832d6.scope has finished successfully. ░░ ░░ The job identifier is 2279. Jan 18 11:33:40 managed-node1 podman[14614]: 2025-01-18 11:33:40.264236296 -0500 EST m=+0.133852961 container init 392362ac44baf9c0901df4ae07776e7099c83a4f2a836a7f761d369f9ea832d6 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:40 managed-node1 podman[14614]: 2025-01-18 11:33:40.267313575 -0500 EST m=+0.136930241 container start 392362ac44baf9c0901df4ae07776e7099c83a4f2a836a7f761d369f9ea832d6 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:40 managed-node1 systemd[1]: libpod-392362ac44baf9c0901df4ae07776e7099c83a4f2a836a7f761d369f9ea832d6.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-392362ac44baf9c0901df4ae07776e7099c83a4f2a836a7f761d369f9ea832d6.scope has successfully entered the 'dead' state. Jan 18 11:33:40 managed-node1 podman[14614]: 2025-01-18 11:33:40.273314347 -0500 EST m=+0.142930998 pod start 673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3 (image=, name=quadlet-pod) Jan 18 11:33:40 managed-node1 quadlet-pod-pod-pod[14614]: quadlet-pod Jan 18 11:33:40 managed-node1 systemd[1]: Started quadlet-pod-pod-pod.service. ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has finished successfully. ░░ ░░ The job identifier is 2110. Jan 18 11:33:40 managed-node1 podman[14670]: 2025-01-18 11:33:40.309469316 -0500 EST m=+0.030173893 container died 392362ac44baf9c0901df4ae07776e7099c83a4f2a836a7f761d369f9ea832d6 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:40 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:33:40 managed-node1 kernel: veth0 (unregistering): left allmulticast mode Jan 18 11:33:40 managed-node1 kernel: veth0 (unregistering): left promiscuous mode Jan 18 11:33:40 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.3526] device (podman0): state change: activated -> unmanaged (reason 'unmanaged', managed-type: 'removed') Jan 18 11:33:40 managed-node1 podman[14670]: 2025-01-18 11:33:40.407060902 -0500 EST m=+0.127765289 container cleanup 392362ac44baf9c0901df4ae07776e7099c83a4f2a836a7f761d369f9ea832d6 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:40 managed-node1 podman[14670]: 2025-01-18 11:33:40.408230812 -0500 EST m=+0.128935235 pod stop 673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3 (image=, name=quadlet-pod) Jan 18 11:33:40 managed-node1 systemd[1]: Removed slice machine-libpod_pod_673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3.slice - cgroup machine-libpod_pod_673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3.slice. ░░ Subject: A stop job for unit machine-libpod_pod_673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3.slice has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit machine-libpod_pod_673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3.slice has finished. ░░ ░░ The job identifier is 2286 and the job result is done. Jan 18 11:33:40 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Main process exited, code=exited, status=1/FAILURE ░░ Subject: Unit process exited ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ An ExecStart= process belonging to unit quadlet-pod-pod-pod.service has exited. ░░ ░░ The process' exit code is 'exited' and its exit status is 1. Jan 18 11:33:40 managed-node1 podman[14714]: 2025-01-18 11:33:40.492903497 -0500 EST m=+0.049470406 container remove 392362ac44baf9c0901df4ae07776e7099c83a4f2a836a7f761d369f9ea832d6 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:40 managed-node1 podman[14714]: 2025-01-18 11:33:40.503091083 -0500 EST m=+0.059657964 pod remove 673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3 (image=, name=quadlet-pod) Jan 18 11:33:40 managed-node1 quadlet-pod-pod-pod[14714]: 673252f46aa3cd0ca7646bf2fd488b1d720447d041c842b1117c4f40a3f4b8c3 Jan 18 11:33:40 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Failed with result 'exit-code'. ░░ Subject: Unit failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit quadlet-pod-pod-pod.service has entered the 'failed' state with result 'exit-code'. Jan 18 11:33:40 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Scheduled restart job, restart counter is at 1. ░░ Subject: Automatic restarting of a unit has been scheduled ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ Automatic restarting of the unit quadlet-pod-pod-pod.service has been scheduled, as the result for ░░ the configured Restart= setting for the unit. Jan 18 11:33:40 managed-node1 systemd[1]: Starting quadlet-pod-pod-pod.service... ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has begun execution. ░░ ░░ The job identifier is 2288. Jan 18 11:33:40 managed-node1 systemd[1]: Created slice machine-libpod_pod_fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692.slice - cgroup machine-libpod_pod_fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692.slice. ░░ Subject: A start job for unit machine-libpod_pod_fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit machine-libpod_pod_fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692.slice has finished successfully. ░░ ░░ The job identifier is 2372. Jan 18 11:33:40 managed-node1 podman[14724]: 2025-01-18 11:33:40.867584006 -0500 EST m=+0.076424916 container create 71d159e1b2eba1445354a682ab267c12b7ea2f67918a5cad6861c85c697e28de (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692, io.buildah.version=1.38.0, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service) Jan 18 11:33:40 managed-node1 podman[14724]: 2025-01-18 11:33:40.873786077 -0500 EST m=+0.082626984 pod create fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692 (image=, name=quadlet-pod) Jan 18 11:33:40 managed-node1 quadlet-pod-pod-pod[14724]: fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692 Jan 18 11:33:40 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.9275] manager: (podman0): new Bridge device (/org/freedesktop/NetworkManager/Devices/7) Jan 18 11:33:40 managed-node1 kernel: podman0: port 1(veth0) entered blocking state Jan 18 11:33:40 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:33:40 managed-node1 kernel: veth0: entered allmulticast mode Jan 18 11:33:40 managed-node1 kernel: veth0: entered promiscuous mode Jan 18 11:33:40 managed-node1 (udev-worker)[14645]: Network interface NamePolicy= disabled on kernel command line. Jan 18 11:33:40 managed-node1 kernel: podman0: port 1(veth0) entered blocking state Jan 18 11:33:40 managed-node1 kernel: podman0: port 1(veth0) entered forwarding state Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.9424] manager: (veth0): new Veth device (/org/freedesktop/NetworkManager/Devices/8) Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.9473] device (podman0): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external') Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.9479] device (podman0): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'external') Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.9485] device (podman0): Activation: starting connection 'podman0' (f19726fd-c620-48a6-81ec-88aca5176477) Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.9496] device (veth0): carrier: link connected Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.9501] device (podman0): carrier: link connected Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.9504] device (podman0): state change: disconnected -> prepare (reason 'none', managed-type: 'external') Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.9510] device (podman0): state change: prepare -> config (reason 'none', managed-type: 'external') Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.9513] device (podman0): state change: config -> ip-config (reason 'none', managed-type: 'external') Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.9517] device (podman0): state change: ip-config -> ip-check (reason 'none', managed-type: 'external') Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.9564] device (podman0): state change: ip-check -> secondaries (reason 'none', managed-type: 'external') Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.9615] device (podman0): state change: secondaries -> activated (reason 'none', managed-type: 'external') Jan 18 11:33:40 managed-node1 NetworkManager[711]: [1737218020.9620] device (podman0): Activation: successful, device activated. Jan 18 11:33:41 managed-node1 systemd[1]: Started libpod-71d159e1b2eba1445354a682ab267c12b7ea2f67918a5cad6861c85c697e28de.scope - libcrun container. ░░ Subject: A start job for unit libpod-71d159e1b2eba1445354a682ab267c12b7ea2f67918a5cad6861c85c697e28de.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-71d159e1b2eba1445354a682ab267c12b7ea2f67918a5cad6861c85c697e28de.scope has finished successfully. ░░ ░░ The job identifier is 2378. Jan 18 11:33:41 managed-node1 podman[14733]: 2025-01-18 11:33:41.024075225 -0500 EST m=+0.130462979 container init 71d159e1b2eba1445354a682ab267c12b7ea2f67918a5cad6861c85c697e28de (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:41 managed-node1 podman[14733]: 2025-01-18 11:33:41.026932567 -0500 EST m=+0.133320364 container start 71d159e1b2eba1445354a682ab267c12b7ea2f67918a5cad6861c85c697e28de (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692, io.buildah.version=1.38.0, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service) Jan 18 11:33:41 managed-node1 systemd[1]: libpod-71d159e1b2eba1445354a682ab267c12b7ea2f67918a5cad6861c85c697e28de.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-71d159e1b2eba1445354a682ab267c12b7ea2f67918a5cad6861c85c697e28de.scope has successfully entered the 'dead' state. Jan 18 11:33:41 managed-node1 conmon[14759]: conmon 71d159e1b2eba1445354 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/machine-libpod_pod_fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692.slice/libpod-71d159e1b2eba1445354a682ab267c12b7ea2f67918a5cad6861c85c697e28de.scope/container/memory.events Jan 18 11:33:41 managed-node1 podman[14733]: 2025-01-18 11:33:41.033064726 -0500 EST m=+0.139452443 pod start fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692 (image=, name=quadlet-pod) Jan 18 11:33:41 managed-node1 quadlet-pod-pod-pod[14733]: quadlet-pod Jan 18 11:33:41 managed-node1 systemd[1]: Started quadlet-pod-pod-pod.service. ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has finished successfully. ░░ ░░ The job identifier is 2288. Jan 18 11:33:41 managed-node1 podman[14763]: 2025-01-18 11:33:41.067125334 -0500 EST m=+0.024761993 container died 71d159e1b2eba1445354a682ab267c12b7ea2f67918a5cad6861c85c697e28de (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:41 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:33:41 managed-node1 kernel: veth0 (unregistering): left allmulticast mode Jan 18 11:33:41 managed-node1 kernel: veth0 (unregistering): left promiscuous mode Jan 18 11:33:41 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:33:41 managed-node1 NetworkManager[711]: [1737218021.1029] device (podman0): state change: activated -> unmanaged (reason 'unmanaged', managed-type: 'removed') Jan 18 11:33:41 managed-node1 podman[14763]: 2025-01-18 11:33:41.150359966 -0500 EST m=+0.107996812 container cleanup 71d159e1b2eba1445354a682ab267c12b7ea2f67918a5cad6861c85c697e28de (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:41 managed-node1 podman[14763]: 2025-01-18 11:33:41.151414399 -0500 EST m=+0.109051037 pod stop fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692 (image=, name=quadlet-pod) Jan 18 11:33:41 managed-node1 systemd[1]: Removed slice machine-libpod_pod_fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692.slice - cgroup machine-libpod_pod_fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692.slice. ░░ Subject: A stop job for unit machine-libpod_pod_fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692.slice has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit machine-libpod_pod_fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692.slice has finished. ░░ ░░ The job identifier is 2385 and the job result is done. Jan 18 11:33:41 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Main process exited, code=exited, status=1/FAILURE ░░ Subject: Unit process exited ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ An ExecStart= process belonging to unit quadlet-pod-pod-pod.service has exited. ░░ ░░ The process' exit code is 'exited' and its exit status is 1. Jan 18 11:33:41 managed-node1 podman[14787]: 2025-01-18 11:33:41.2547741 -0500 EST m=+0.062974793 container remove 71d159e1b2eba1445354a682ab267c12b7ea2f67918a5cad6861c85c697e28de (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:41 managed-node1 podman[14787]: 2025-01-18 11:33:41.268902616 -0500 EST m=+0.077103294 pod remove fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692 (image=, name=quadlet-pod) Jan 18 11:33:41 managed-node1 quadlet-pod-pod-pod[14787]: fef92eb2cb8895d10542d315a41aa0884b2a10273dd000c7a5ee4f7241819692 Jan 18 11:33:41 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Failed with result 'exit-code'. ░░ Subject: Unit failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit quadlet-pod-pod-pod.service has entered the 'failed' state with result 'exit-code'. Jan 18 11:33:41 managed-node1 python3.12[14906]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jan 18 11:33:41 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Scheduled restart job, restart counter is at 2. ░░ Subject: Automatic restarting of a unit has been scheduled ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ Automatic restarting of the unit quadlet-pod-pod-pod.service has been scheduled, as the result for ░░ the configured Restart= setting for the unit. Jan 18 11:33:41 managed-node1 systemd[1]: Starting quadlet-pod-pod-pod.service... ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has begun execution. ░░ ░░ The job identifier is 2387. Jan 18 11:33:41 managed-node1 systemd[1]: Created slice machine-libpod_pod_5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746.slice - cgroup machine-libpod_pod_5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746.slice. ░░ Subject: A start job for unit machine-libpod_pod_5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit machine-libpod_pod_5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746.slice has finished successfully. ░░ ░░ The job identifier is 2471. Jan 18 11:33:41 managed-node1 podman[14930]: 2025-01-18 11:33:41.67679635 -0500 EST m=+0.071343444 container create cc87a80782819afba977965f28149532f75c8f54e7cab9c4ee626de0ef6a7ed2 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:41 managed-node1 podman[14930]: 2025-01-18 11:33:41.683125506 -0500 EST m=+0.077672604 pod create 5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746 (image=, name=quadlet-pod) Jan 18 11:33:41 managed-node1 quadlet-pod-pod-pod[14930]: 5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746 Jan 18 11:33:41 managed-node1 NetworkManager[711]: [1737218021.7309] manager: (podman0): new Bridge device (/org/freedesktop/NetworkManager/Devices/9) Jan 18 11:33:41 managed-node1 kernel: podman0: port 1(veth0) entered blocking state Jan 18 11:33:41 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:33:41 managed-node1 kernel: veth0: entered allmulticast mode Jan 18 11:33:41 managed-node1 kernel: veth0: entered promiscuous mode Jan 18 11:33:41 managed-node1 kernel: podman0: port 1(veth0) entered blocking state Jan 18 11:33:41 managed-node1 kernel: podman0: port 1(veth0) entered forwarding state Jan 18 11:33:41 managed-node1 NetworkManager[711]: [1737218021.7401] manager: (veth0): new Veth device (/org/freedesktop/NetworkManager/Devices/10) Jan 18 11:33:41 managed-node1 NetworkManager[711]: [1737218021.7413] device (veth0): carrier: link connected Jan 18 11:33:41 managed-node1 NetworkManager[711]: [1737218021.7415] device (podman0): carrier: link connected Jan 18 11:33:41 managed-node1 NetworkManager[711]: [1737218021.7619] device (podman0): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external') Jan 18 11:33:41 managed-node1 NetworkManager[711]: [1737218021.7626] device (podman0): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'external') Jan 18 11:33:41 managed-node1 NetworkManager[711]: [1737218021.7634] device (podman0): Activation: starting connection 'podman0' (578d14fd-34f6-4f03-9983-1048994dadcb) Jan 18 11:33:41 managed-node1 NetworkManager[711]: [1737218021.7639] device (podman0): state change: disconnected -> prepare (reason 'none', managed-type: 'external') Jan 18 11:33:41 managed-node1 NetworkManager[711]: [1737218021.7642] device (podman0): state change: prepare -> config (reason 'none', managed-type: 'external') Jan 18 11:33:41 managed-node1 NetworkManager[711]: [1737218021.7645] device (podman0): state change: config -> ip-config (reason 'none', managed-type: 'external') Jan 18 11:33:41 managed-node1 NetworkManager[711]: [1737218021.7649] device (podman0): state change: ip-config -> ip-check (reason 'none', managed-type: 'external') Jan 18 11:33:41 managed-node1 NetworkManager[711]: [1737218021.7709] device (podman0): state change: ip-check -> secondaries (reason 'none', managed-type: 'external') Jan 18 11:33:41 managed-node1 NetworkManager[711]: [1737218021.7710] device (podman0): state change: secondaries -> activated (reason 'none', managed-type: 'external') Jan 18 11:33:41 managed-node1 NetworkManager[711]: [1737218021.7716] device (podman0): Activation: successful, device activated. Jan 18 11:33:41 managed-node1 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-71d159e1b2eba1445354a682ab267c12b7ea2f67918a5cad6861c85c697e28de-userdata-shm.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay\x2dcontainers-71d159e1b2eba1445354a682ab267c12b7ea2f67918a5cad6861c85c697e28de-userdata-shm.mount has successfully entered the 'dead' state. Jan 18 11:33:41 managed-node1 systemd[1]: Started libpod-cc87a80782819afba977965f28149532f75c8f54e7cab9c4ee626de0ef6a7ed2.scope - libcrun container. ░░ Subject: A start job for unit libpod-cc87a80782819afba977965f28149532f75c8f54e7cab9c4ee626de0ef6a7ed2.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-cc87a80782819afba977965f28149532f75c8f54e7cab9c4ee626de0ef6a7ed2.scope has finished successfully. ░░ ░░ The job identifier is 2477. Jan 18 11:33:41 managed-node1 podman[14938]: 2025-01-18 11:33:41.833984995 -0500 EST m=+0.133229395 container init cc87a80782819afba977965f28149532f75c8f54e7cab9c4ee626de0ef6a7ed2 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:41 managed-node1 podman[14938]: 2025-01-18 11:33:41.836860526 -0500 EST m=+0.136105046 container start cc87a80782819afba977965f28149532f75c8f54e7cab9c4ee626de0ef6a7ed2 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:41 managed-node1 systemd[1]: libpod-cc87a80782819afba977965f28149532f75c8f54e7cab9c4ee626de0ef6a7ed2.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-cc87a80782819afba977965f28149532f75c8f54e7cab9c4ee626de0ef6a7ed2.scope has successfully entered the 'dead' state. Jan 18 11:33:41 managed-node1 podman[14938]: 2025-01-18 11:33:41.843610464 -0500 EST m=+0.142854806 pod start 5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746 (image=, name=quadlet-pod) Jan 18 11:33:41 managed-node1 quadlet-pod-pod-pod[14938]: quadlet-pod Jan 18 11:33:41 managed-node1 systemd[1]: Started quadlet-pod-pod-pod.service. ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has finished successfully. ░░ ░░ The job identifier is 2387. Jan 18 11:33:41 managed-node1 podman[14968]: 2025-01-18 11:33:41.875395285 -0500 EST m=+0.023750684 container died cc87a80782819afba977965f28149532f75c8f54e7cab9c4ee626de0ef6a7ed2 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:41 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:33:41 managed-node1 kernel: veth0 (unregistering): left allmulticast mode Jan 18 11:33:41 managed-node1 kernel: veth0 (unregistering): left promiscuous mode Jan 18 11:33:41 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:33:41 managed-node1 NetworkManager[711]: [1737218021.9070] device (podman0): state change: activated -> unmanaged (reason 'unmanaged', managed-type: 'removed') Jan 18 11:33:41 managed-node1 systemd[1]: run-netns-netns\x2d6264de46\x2d32a9\x2d432c\x2d487b\x2d4419c043634d.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-netns-netns\x2d6264de46\x2d32a9\x2d432c\x2d487b\x2d4419c043634d.mount has successfully entered the 'dead' state. Jan 18 11:33:41 managed-node1 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-cc87a80782819afba977965f28149532f75c8f54e7cab9c4ee626de0ef6a7ed2-userdata-shm.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay\x2dcontainers-cc87a80782819afba977965f28149532f75c8f54e7cab9c4ee626de0ef6a7ed2-userdata-shm.mount has successfully entered the 'dead' state. Jan 18 11:33:41 managed-node1 systemd[1]: var-lib-containers-storage-overlay-c1e9162aeff1201990e8d156c1a1e46da6d3b4693c3f06eef70039638e5e918c-merged.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay-c1e9162aeff1201990e8d156c1a1e46da6d3b4693c3f06eef70039638e5e918c-merged.mount has successfully entered the 'dead' state. Jan 18 11:33:41 managed-node1 podman[14968]: 2025-01-18 11:33:41.953694779 -0500 EST m=+0.102050647 container cleanup cc87a80782819afba977965f28149532f75c8f54e7cab9c4ee626de0ef6a7ed2 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:41 managed-node1 podman[14968]: 2025-01-18 11:33:41.955418494 -0500 EST m=+0.103773946 pod stop 5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746 (image=, name=quadlet-pod) Jan 18 11:33:41 managed-node1 systemd[1]: Removed slice machine-libpod_pod_5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746.slice - cgroup machine-libpod_pod_5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746.slice. ░░ Subject: A stop job for unit machine-libpod_pod_5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746.slice has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit machine-libpod_pod_5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746.slice has finished. ░░ ░░ The job identifier is 2484 and the job result is done. Jan 18 11:33:41 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jan 18 11:33:41 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Main process exited, code=exited, status=1/FAILURE ░░ Subject: Unit process exited ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ An ExecStart= process belonging to unit quadlet-pod-pod-pod.service has exited. ░░ ░░ The process' exit code is 'exited' and its exit status is 1. Jan 18 11:33:42 managed-node1 podman[14991]: 2025-01-18 11:33:42.044406496 -0500 EST m=+0.050948258 container remove cc87a80782819afba977965f28149532f75c8f54e7cab9c4ee626de0ef6a7ed2 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746, io.buildah.version=1.38.0, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service) Jan 18 11:33:42 managed-node1 podman[14991]: 2025-01-18 11:33:42.056292467 -0500 EST m=+0.062834203 pod remove 5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746 (image=, name=quadlet-pod) Jan 18 11:33:42 managed-node1 quadlet-pod-pod-pod[14991]: 5ab50875b857c86984e530ce1ccb1ac61c717426472b5127fe4eee7986577746 Jan 18 11:33:42 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Failed with result 'exit-code'. ░░ Subject: Unit failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit quadlet-pod-pod-pod.service has entered the 'failed' state with result 'exit-code'. Jan 18 11:33:42 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Scheduled restart job, restart counter is at 3. ░░ Subject: Automatic restarting of a unit has been scheduled ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ Automatic restarting of the unit quadlet-pod-pod-pod.service has been scheduled, as the result for ░░ the configured Restart= setting for the unit. Jan 18 11:33:42 managed-node1 systemd[1]: Starting quadlet-pod-pod-pod.service... ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has begun execution. ░░ ░░ The job identifier is 2486. Jan 18 11:33:42 managed-node1 systemd[1]: Created slice machine-libpod_pod_e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c.slice - cgroup machine-libpod_pod_e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c.slice. ░░ Subject: A start job for unit machine-libpod_pod_e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit machine-libpod_pod_e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c.slice has finished successfully. ░░ ░░ The job identifier is 2570. Jan 18 11:33:42 managed-node1 podman[15002]: 2025-01-18 11:33:42.428345618 -0500 EST m=+0.080049535 container create bd7e15cdccba04722af7ebc567492613e19c206a29f4f50542536c33119fc826 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:42 managed-node1 podman[15002]: 2025-01-18 11:33:42.434625701 -0500 EST m=+0.086329508 pod create e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c (image=, name=quadlet-pod) Jan 18 11:33:42 managed-node1 quadlet-pod-pod-pod[15002]: e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c Jan 18 11:33:42 managed-node1 kernel: podman0: port 1(veth0) entered blocking state Jan 18 11:33:42 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:33:42 managed-node1 kernel: veth0: entered allmulticast mode Jan 18 11:33:42 managed-node1 kernel: veth0: entered promiscuous mode Jan 18 11:33:42 managed-node1 kernel: podman0: port 1(veth0) entered blocking state Jan 18 11:33:42 managed-node1 kernel: podman0: port 1(veth0) entered forwarding state Jan 18 11:33:42 managed-node1 NetworkManager[711]: [1737218022.4932] manager: (podman0): new Bridge device (/org/freedesktop/NetworkManager/Devices/11) Jan 18 11:33:42 managed-node1 NetworkManager[711]: [1737218022.4974] device (podman0): carrier: link connected Jan 18 11:33:42 managed-node1 NetworkManager[711]: [1737218022.4984] device (veth0): carrier: link connected Jan 18 11:33:42 managed-node1 NetworkManager[711]: [1737218022.4989] manager: (veth0): new Veth device (/org/freedesktop/NetworkManager/Devices/12) Jan 18 11:33:42 managed-node1 NetworkManager[711]: [1737218022.5085] device (podman0): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external') Jan 18 11:33:42 managed-node1 NetworkManager[711]: [1737218022.5093] device (podman0): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'external') Jan 18 11:33:42 managed-node1 NetworkManager[711]: [1737218022.5100] device (podman0): Activation: starting connection 'podman0' (f8555d8f-9dd1-4a6b-8a25-8e0e9826b378) Jan 18 11:33:42 managed-node1 NetworkManager[711]: [1737218022.5103] device (podman0): state change: disconnected -> prepare (reason 'none', managed-type: 'external') Jan 18 11:33:42 managed-node1 NetworkManager[711]: [1737218022.5105] device (podman0): state change: prepare -> config (reason 'none', managed-type: 'external') Jan 18 11:33:42 managed-node1 NetworkManager[711]: [1737218022.5114] device (podman0): state change: config -> ip-config (reason 'none', managed-type: 'external') Jan 18 11:33:42 managed-node1 NetworkManager[711]: [1737218022.5125] device (podman0): state change: ip-config -> ip-check (reason 'none', managed-type: 'external') Jan 18 11:33:42 managed-node1 NetworkManager[711]: [1737218022.5155] device (podman0): state change: ip-check -> secondaries (reason 'none', managed-type: 'external') Jan 18 11:33:42 managed-node1 NetworkManager[711]: [1737218022.5163] device (podman0): state change: secondaries -> activated (reason 'none', managed-type: 'external') Jan 18 11:33:42 managed-node1 NetworkManager[711]: [1737218022.5192] device (podman0): Activation: successful, device activated. Jan 18 11:33:42 managed-node1 systemd[1]: Started libpod-bd7e15cdccba04722af7ebc567492613e19c206a29f4f50542536c33119fc826.scope - libcrun container. ░░ Subject: A start job for unit libpod-bd7e15cdccba04722af7ebc567492613e19c206a29f4f50542536c33119fc826.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-bd7e15cdccba04722af7ebc567492613e19c206a29f4f50542536c33119fc826.scope has finished successfully. ░░ ░░ The job identifier is 2576. Jan 18 11:33:42 managed-node1 podman[15010]: 2025-01-18 11:33:42.577700529 -0500 EST m=+0.123417871 container init bd7e15cdccba04722af7ebc567492613e19c206a29f4f50542536c33119fc826 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:42 managed-node1 podman[15010]: 2025-01-18 11:33:42.58088775 -0500 EST m=+0.126605192 container start bd7e15cdccba04722af7ebc567492613e19c206a29f4f50542536c33119fc826 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:42 managed-node1 systemd[1]: libpod-bd7e15cdccba04722af7ebc567492613e19c206a29f4f50542536c33119fc826.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-bd7e15cdccba04722af7ebc567492613e19c206a29f4f50542536c33119fc826.scope has successfully entered the 'dead' state. Jan 18 11:33:42 managed-node1 conmon[15038]: conmon bd7e15cdccba04722af7 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/machine-libpod_pod_e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c.slice/libpod-bd7e15cdccba04722af7ebc567492613e19c206a29f4f50542536c33119fc826.scope/container/memory.events Jan 18 11:33:42 managed-node1 podman[15010]: 2025-01-18 11:33:42.587145651 -0500 EST m=+0.132862945 pod start e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c (image=, name=quadlet-pod) Jan 18 11:33:42 managed-node1 quadlet-pod-pod-pod[15010]: quadlet-pod Jan 18 11:33:42 managed-node1 systemd[1]: Started quadlet-pod-pod-pod.service. ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has finished successfully. ░░ ░░ The job identifier is 2486. Jan 18 11:33:42 managed-node1 podman[15043]: 2025-01-18 11:33:42.620274963 -0500 EST m=+0.023752615 container died bd7e15cdccba04722af7ebc567492613e19c206a29f4f50542536c33119fc826 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:42 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:33:42 managed-node1 kernel: veth0 (unregistering): left allmulticast mode Jan 18 11:33:42 managed-node1 kernel: veth0 (unregistering): left promiscuous mode Jan 18 11:33:42 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:33:42 managed-node1 NetworkManager[711]: [1737218022.6600] device (podman0): state change: activated -> unmanaged (reason 'unmanaged', managed-type: 'removed') Jan 18 11:33:42 managed-node1 podman[15043]: 2025-01-18 11:33:42.703001322 -0500 EST m=+0.106478971 container cleanup bd7e15cdccba04722af7ebc567492613e19c206a29f4f50542536c33119fc826 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:42 managed-node1 podman[15043]: 2025-01-18 11:33:42.704842889 -0500 EST m=+0.108320516 pod stop e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c (image=, name=quadlet-pod) Jan 18 11:33:42 managed-node1 systemd[1]: Removed slice machine-libpod_pod_e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c.slice - cgroup machine-libpod_pod_e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c.slice. ░░ Subject: A stop job for unit machine-libpod_pod_e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c.slice has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit machine-libpod_pod_e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c.slice has finished. ░░ ░░ The job identifier is 2583 and the job result is done. Jan 18 11:33:42 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Main process exited, code=exited, status=1/FAILURE ░░ Subject: Unit process exited ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ An ExecStart= process belonging to unit quadlet-pod-pod-pod.service has exited. ░░ ░░ The process' exit code is 'exited' and its exit status is 1. Jan 18 11:33:42 managed-node1 podman[15066]: 2025-01-18 11:33:42.788883839 -0500 EST m=+0.051451334 container remove bd7e15cdccba04722af7ebc567492613e19c206a29f4f50542536c33119fc826 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:42 managed-node1 podman[15066]: 2025-01-18 11:33:42.799225016 -0500 EST m=+0.061792504 pod remove e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c (image=, name=quadlet-pod) Jan 18 11:33:42 managed-node1 quadlet-pod-pod-pod[15066]: e67d079b52a8a7feb3dbcc1e16e4ea2862714b175cc0bfd55ac3c27cf965666c Jan 18 11:33:42 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jan 18 11:33:42 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Failed with result 'exit-code'. ░░ Subject: Unit failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit quadlet-pod-pod-pod.service has entered the 'failed' state with result 'exit-code'. Jan 18 11:33:43 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Scheduled restart job, restart counter is at 4. ░░ Subject: Automatic restarting of a unit has been scheduled ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ Automatic restarting of the unit quadlet-pod-pod-pod.service has been scheduled, as the result for ░░ the configured Restart= setting for the unit. Jan 18 11:33:43 managed-node1 systemd[1]: Starting quadlet-pod-pod-pod.service... ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has begun execution. ░░ ░░ The job identifier is 2585. Jan 18 11:33:43 managed-node1 systemd[1]: Created slice machine-libpod_pod_f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46.slice - cgroup machine-libpod_pod_f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46.slice. ░░ Subject: A start job for unit machine-libpod_pod_f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit machine-libpod_pod_f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46.slice has finished successfully. ░░ ░░ The job identifier is 2669. Jan 18 11:33:43 managed-node1 podman[15123]: 2025-01-18 11:33:43.177068791 -0500 EST m=+0.077884436 container create 1ea9ccb136e01b3c0ce0adb6d632480a4583a02c6e6afdbe07e90acf624cbc75 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46, io.buildah.version=1.38.0, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service) Jan 18 11:33:43 managed-node1 podman[15123]: 2025-01-18 11:33:43.183286715 -0500 EST m=+0.084102413 pod create f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46 (image=, name=quadlet-pod) Jan 18 11:33:43 managed-node1 quadlet-pod-pod-pod[15123]: f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46 Jan 18 11:33:43 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jan 18 11:33:43 managed-node1 NetworkManager[711]: [1737218023.2500] manager: (podman0): new Bridge device (/org/freedesktop/NetworkManager/Devices/13) Jan 18 11:33:43 managed-node1 kernel: podman0: port 1(veth0) entered blocking state Jan 18 11:33:43 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:33:43 managed-node1 kernel: veth0: entered allmulticast mode Jan 18 11:33:43 managed-node1 kernel: veth0: entered promiscuous mode Jan 18 11:33:43 managed-node1 kernel: podman0: port 1(veth0) entered blocking state Jan 18 11:33:43 managed-node1 kernel: podman0: port 1(veth0) entered forwarding state Jan 18 11:33:43 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:33:43 managed-node1 kernel: podman0: port 1(veth0) entered blocking state Jan 18 11:33:43 managed-node1 kernel: podman0: port 1(veth0) entered forwarding state Jan 18 11:33:43 managed-node1 NetworkManager[711]: [1737218023.2670] device (podman0): carrier: link connected Jan 18 11:33:43 managed-node1 NetworkManager[711]: [1737218023.2675] device (veth0): carrier: link connected Jan 18 11:33:43 managed-node1 NetworkManager[711]: [1737218023.2719] manager: (veth0): new Veth device (/org/freedesktop/NetworkManager/Devices/14) Jan 18 11:33:43 managed-node1 NetworkManager[711]: [1737218023.2880] device (podman0): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external') Jan 18 11:33:43 managed-node1 NetworkManager[711]: [1737218023.2892] device (podman0): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'external') Jan 18 11:33:43 managed-node1 NetworkManager[711]: [1737218023.2899] device (podman0): Activation: starting connection 'podman0' (cde6d46a-6303-4ae2-a932-44130c439952) Jan 18 11:33:43 managed-node1 NetworkManager[711]: [1737218023.2902] device (podman0): state change: disconnected -> prepare (reason 'none', managed-type: 'external') Jan 18 11:33:43 managed-node1 NetworkManager[711]: [1737218023.2904] device (podman0): state change: prepare -> config (reason 'none', managed-type: 'external') Jan 18 11:33:43 managed-node1 NetworkManager[711]: [1737218023.2905] device (podman0): state change: config -> ip-config (reason 'none', managed-type: 'external') Jan 18 11:33:43 managed-node1 NetworkManager[711]: [1737218023.2909] device (podman0): state change: ip-config -> ip-check (reason 'none', managed-type: 'external') Jan 18 11:33:43 managed-node1 NetworkManager[711]: [1737218023.3094] device (podman0): state change: ip-check -> secondaries (reason 'none', managed-type: 'external') Jan 18 11:33:43 managed-node1 NetworkManager[711]: [1737218023.3096] device (podman0): state change: secondaries -> activated (reason 'none', managed-type: 'external') Jan 18 11:33:43 managed-node1 NetworkManager[711]: [1737218023.3109] device (podman0): Activation: successful, device activated. Jan 18 11:33:43 managed-node1 systemd[1]: Started libpod-1ea9ccb136e01b3c0ce0adb6d632480a4583a02c6e6afdbe07e90acf624cbc75.scope - libcrun container. ░░ Subject: A start job for unit libpod-1ea9ccb136e01b3c0ce0adb6d632480a4583a02c6e6afdbe07e90acf624cbc75.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-1ea9ccb136e01b3c0ce0adb6d632480a4583a02c6e6afdbe07e90acf624cbc75.scope has finished successfully. ░░ ░░ The job identifier is 2675. Jan 18 11:33:43 managed-node1 podman[15132]: 2025-01-18 11:33:43.363268029 -0500 EST m=+0.157799375 container init 1ea9ccb136e01b3c0ce0adb6d632480a4583a02c6e6afdbe07e90acf624cbc75 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:43 managed-node1 podman[15132]: 2025-01-18 11:33:43.368080332 -0500 EST m=+0.162611411 container start 1ea9ccb136e01b3c0ce0adb6d632480a4583a02c6e6afdbe07e90acf624cbc75 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:43 managed-node1 conmon[15209]: conmon 1ea9ccb136e01b3c0ce0 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/machine-libpod_pod_f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46.slice/libpod-1ea9ccb136e01b3c0ce0adb6d632480a4583a02c6e6afdbe07e90acf624cbc75.scope/container/memory.events Jan 18 11:33:43 managed-node1 systemd[1]: libpod-1ea9ccb136e01b3c0ce0adb6d632480a4583a02c6e6afdbe07e90acf624cbc75.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-1ea9ccb136e01b3c0ce0adb6d632480a4583a02c6e6afdbe07e90acf624cbc75.scope has successfully entered the 'dead' state. Jan 18 11:33:43 managed-node1 podman[15132]: 2025-01-18 11:33:43.378339606 -0500 EST m=+0.172870674 pod start f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46 (image=, name=quadlet-pod) Jan 18 11:33:43 managed-node1 quadlet-pod-pod-pod[15132]: quadlet-pod Jan 18 11:33:43 managed-node1 systemd[1]: Started quadlet-pod-pod-pod.service. ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has finished successfully. ░░ ░░ The job identifier is 2585. Jan 18 11:33:43 managed-node1 podman[15227]: 2025-01-18 11:33:43.428553199 -0500 EST m=+0.039068776 container died 1ea9ccb136e01b3c0ce0adb6d632480a4583a02c6e6afdbe07e90acf624cbc75 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, io.buildah.version=1.38.0, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service) Jan 18 11:33:43 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:33:43 managed-node1 kernel: veth0 (unregistering): left allmulticast mode Jan 18 11:33:43 managed-node1 kernel: veth0 (unregistering): left promiscuous mode Jan 18 11:33:43 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Jan 18 11:33:43 managed-node1 NetworkManager[711]: [1737218023.5174] device (podman0): state change: activated -> unmanaged (reason 'unmanaged', managed-type: 'removed') Jan 18 11:33:43 managed-node1 podman[15227]: 2025-01-18 11:33:43.574297477 -0500 EST m=+0.184812888 container cleanup 1ea9ccb136e01b3c0ce0adb6d632480a4583a02c6e6afdbe07e90acf624cbc75 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service, io.buildah.version=1.38.0) Jan 18 11:33:43 managed-node1 podman[15227]: 2025-01-18 11:33:43.576291098 -0500 EST m=+0.186806507 pod stop f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46 (image=, name=quadlet-pod) Jan 18 11:33:43 managed-node1 systemd[1]: Removed slice machine-libpod_pod_f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46.slice - cgroup machine-libpod_pod_f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46.slice. ░░ Subject: A stop job for unit machine-libpod_pod_f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46.slice has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit machine-libpod_pod_f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46.slice has finished. ░░ ░░ The job identifier is 2682 and the job result is done. Jan 18 11:33:43 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Main process exited, code=exited, status=1/FAILURE ░░ Subject: Unit process exited ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ An ExecStart= process belonging to unit quadlet-pod-pod-pod.service has exited. ░░ ░░ The process' exit code is 'exited' and its exit status is 1. Jan 18 11:33:43 managed-node1 podman[15255]: 2025-01-18 11:33:43.694967472 -0500 EST m=+0.072643489 container remove 1ea9ccb136e01b3c0ce0adb6d632480a4583a02c6e6afdbe07e90acf624cbc75 (image=localhost/podman-pause:5.3.1-1733097600, name=quadlet-pod-infra, pod_id=f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46, io.buildah.version=1.38.0, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service) Jan 18 11:33:43 managed-node1 podman[15255]: 2025-01-18 11:33:43.710883834 -0500 EST m=+0.088559423 pod remove f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46 (image=, name=quadlet-pod) Jan 18 11:33:43 managed-node1 quadlet-pod-pod-pod[15255]: f9790cdb0611c39f613e9366218585edaab48009a540d12703e818c9f4896b46 Jan 18 11:33:43 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Failed with result 'exit-code'. ░░ Subject: Unit failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit quadlet-pod-pod-pod.service has entered the 'failed' state with result 'exit-code'. Jan 18 11:33:43 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jan 18 11:33:43 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Scheduled restart job, restart counter is at 5. ░░ Subject: Automatic restarting of a unit has been scheduled ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ Automatic restarting of the unit quadlet-pod-pod-pod.service has been scheduled, as the result for ░░ the configured Restart= setting for the unit. Jan 18 11:33:43 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Start request repeated too quickly. Jan 18 11:33:43 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Failed with result 'exit-code'. ░░ Subject: Unit failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit quadlet-pod-pod-pod.service has entered the 'failed' state with result 'exit-code'. Jan 18 11:33:43 managed-node1 systemd[1]: Failed to start quadlet-pod-pod-pod.service. ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has finished with a failure. ░░ ░░ The job identifier is 2684 and the job result is failed. Jan 18 11:33:47 managed-node1 podman[15262]: 2025-01-18 11:33:47.002680632 -0500 EST m=+3.361550536 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610 Jan 18 11:33:47 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jan 18 11:33:47 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jan 18 11:33:47 managed-node1 python3.12[15431]: ansible-file Invoked with path=/etc/containers/systemd state=directory owner=root group=0 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Jan 18 11:33:47 managed-node1 python3.12[15562]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/systemd/quadlet-pod-container.container follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Jan 18 11:33:48 managed-node1 python3.12[15667]: ansible-ansible.legacy.copy Invoked with src=/root/.ansible/tmp/ansible-tmp-1737218027.6281447-19906-14489979928832/.source.container dest=/etc/containers/systemd/quadlet-pod-container.container owner=root group=0 mode=0644 follow=False _original_basename=systemd.j2 checksum=f0b5c8159fc3c65bf9310a371751609e4c1ba4c3 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None Jan 18 11:33:48 managed-node1 python3.12[15798]: ansible-systemd Invoked with daemon_reload=True scope=system daemon_reexec=False no_block=False name=None state=None enabled=None force=None masked=None Jan 18 11:33:48 managed-node1 systemd[1]: Reload requested from client PID 15799 ('systemctl') (unit session-5.scope)... Jan 18 11:33:48 managed-node1 systemd[1]: Reloading... Jan 18 11:33:48 managed-node1 systemd-rc-local-generator[15844]: /etc/rc.d/rc.local is not marked executable, skipping. Jan 18 11:33:48 managed-node1 systemd-ssh-generator[15846]: Failed to query local AF_VSOCK CID: Permission denied Jan 18 11:33:48 managed-node1 (sd-exec-[15819]: /usr/lib/systemd/system-generators/systemd-ssh-generator failed with exit status 1. Jan 18 11:33:49 managed-node1 systemd[1]: Reloading finished in 206 ms. Jan 18 11:33:49 managed-node1 python3.12[15984]: ansible-systemd Invoked with name=quadlet-pod-container.service scope=system state=started daemon_reload=False daemon_reexec=False no_block=False enabled=None force=None masked=None Jan 18 11:33:49 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Start request repeated too quickly. Jan 18 11:33:49 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Failed with result 'exit-code'. ░░ Subject: Unit failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit quadlet-pod-pod-pod.service has entered the 'failed' state with result 'exit-code'. Jan 18 11:33:49 managed-node1 systemd[1]: Failed to start quadlet-pod-pod-pod.service. ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has finished with a failure. ░░ ░░ The job identifier is 2769 and the job result is failed. Jan 18 11:33:49 managed-node1 systemd[1]: Dependency failed for quadlet-pod-container.service. ░░ Subject: A start job for unit quadlet-pod-container.service has failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-container.service has finished with a failure. ░░ ░░ The job identifier is 2768 and the job result is dependency. Jan 18 11:33:49 managed-node1 systemd[1]: quadlet-pod-container.service: Job quadlet-pod-container.service/start failed with result 'dependency'. Jan 18 11:33:50 managed-node1 python3.12[16118]: ansible-ansible.legacy.command Invoked with _raw_params=set -x set -o pipefail exec 1>&2 #podman volume rm --all #podman network prune -f podman volume ls podman network ls podman secret ls podman container ls podman pod ls podman images systemctl list-units | grep quadlet systemctl list-unit-files | grep quadlet ls -alrtF /etc/containers/systemd /usr/libexec/podman/quadlet -dryrun -v -no-kmsg-log _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jan 18 11:33:50 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jan 18 11:33:50 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jan 18 11:33:51 managed-node1 python3.12[16304]: ansible-ansible.legacy.command Invoked with _raw_params=grep type=AVC /var/log/audit/audit.log _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jan 18 11:33:51 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jan 18 11:33:51 managed-node1 python3.12[16436]: ansible-ansible.legacy.command Invoked with _raw_params=journalctl -ex _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jan 18 11:33:53 managed-node1 python3.12[16699]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jan 18 11:33:53 managed-node1 systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit NetworkManager-dispatcher.service has successfully entered the 'dead' state. Jan 18 11:33:54 managed-node1 python3.12[16837]: ansible-getent Invoked with database=passwd key=user_quadlet_pod fail_key=False service=None split=None Jan 18 11:33:54 managed-node1 python3.12[16969]: ansible-ansible.legacy.command Invoked with _raw_params=journalctl -ex _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None PLAY RECAP ********************************************************************* managed-node1 : ok=66 changed=4 unreachable=0 failed=2 skipped=87 rescued=2 ignored=0 TASKS RECAP ******************************************************************** Saturday 18 January 2025 11:33:54 -0500 (0:00:00.453) 0:00:29.895 ****** =============================================================================== fedora.linux_system_roles.podman : Ensure container images are present --- 4.13s /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:18 Gathering Facts --------------------------------------------------------- 1.41s /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:9 fedora.linux_system_roles.podman : Gather the package facts ------------- 1.30s /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:6 fedora.linux_system_roles.podman : Start service ------------------------ 1.27s /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:110 fedora.linux_system_roles.podman : Reload systemctl --------------------- 1.26s /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:82 fedora.linux_system_roles.podman : Ensure quadlet file is present ------- 1.03s /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:70 Debug3 ------------------------------------------------------------------ 1.02s /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:127 fedora.linux_system_roles.podman : Gather the package facts ------------- 0.89s /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:6 fedora.linux_system_roles.podman : Ensure quadlet file is present ------- 0.81s /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:70 fedora.linux_system_roles.podman : Reload systemctl --------------------- 0.78s /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:82 fedora.linux_system_roles.podman : Ensure the quadlet directory is present --- 0.67s /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:39 fedora.linux_system_roles.podman : Start service ------------------------ 0.58s /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:110 fedora.linux_system_roles.podman : Check if system is ostree ------------ 0.53s /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:11 fedora.linux_system_roles.podman : Get podman version ------------------- 0.52s /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:46 fedora.linux_system_roles.podman : Get user information ----------------- 0.49s /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:2 Dump journal ------------------------------------------------------------ 0.48s /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:151 Dump journal ------------------------------------------------------------ 0.45s /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:194 fedora.linux_system_roles.podman : See if getsubids exists -------------- 0.45s /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:31 fedora.linux_system_roles.podman : See if getsubids exists -------------- 0.44s /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:31 fedora.linux_system_roles.podman : Get user information ----------------- 0.44s /tmp/collections-PRc/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:2