ansible-playbook [core 2.17.12] config file = None configured module search path = ['/root/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/lib/python3.12/site-packages/ansible ansible collection location = /tmp/collections-AV4 executable location = /usr/local/bin/ansible-playbook python version = 3.12.11 (main, Jun 4 2025, 00:00:00) [GCC 11.5.0 20240719 (Red Hat 11.5.0-7)] (/usr/bin/python3.12) jinja version = 3.1.6 libyaml = True No config file found; using defaults running playbook inside collection fedora.linux_system_roles Skipping callback 'debug', as we already have a stdout callback. Skipping callback 'json', as we already have a stdout callback. Skipping callback 'jsonl', as we already have a stdout callback. Skipping callback 'default', as we already have a stdout callback. Skipping callback 'minimal', as we already have a stdout callback. Skipping callback 'oneline', as we already have a stdout callback. PLAYBOOK: tests_quadlet_pod.yml ************************************************ 2 plays in /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml PLAY [all] ********************************************************************* TASK [Include vault variables] ************************************************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:5 Monday 07 July 2025 20:16:32 -0400 (0:00:00.046) 0:00:00.046 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_test_password": { "__ansible_vault": "$ANSIBLE_VAULT;1.1;AES256\n35383939616163653333633431363463313831383037386236646138333162396161356130303461\n3932623930643263313563336163316337643562333936360a363538636631313039343233383732\n38666530383538656639363465313230343533386130303833336434303438333161656262346562\n3362626538613031640a663330613638366132356534363534353239616666653466353961323533\n6565\n" }, "mysql_container_root_password": { "__ansible_vault": "$ANSIBLE_VAULT;1.1;AES256\n61333932373230333539663035366431326163363166363036323963623131363530326231303634\n6635326161643165363366323062333334363730376631660a393566366139353861656364656661\n38653463363837336639363032646433666361646535366137303464623261313663643336306465\n6264663730656337310a343962353137386238383064646533366433333437303566656433386233\n34343235326665646661623131643335313236313131353661386338343366316261643634653633\n3832313034366536616531323963333234326461353130303532\n" } }, "ansible_included_var_files": [ "/tmp/podman-b9i/tests/vars/vault-variables.yml" ], "changed": false } PLAY [Ensure that the role can manage quadlet pods] **************************** TASK [Gathering Facts] ********************************************************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:9 Monday 07 July 2025 20:16:32 -0400 (0:00:00.029) 0:00:00.076 *********** [WARNING]: Platform linux on host managed-node2 is using the discovered Python interpreter at /usr/bin/python3.9, but future installation of another Python interpreter could change the meaning of that path. See https://docs.ansible.com/ansible- core/2.17/reference_appendices/interpreter_discovery.html for more information. ok: [managed-node2] TASK [Run the role - root] ***************************************************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:34 Monday 07 July 2025 20:16:33 -0400 (0:00:01.123) 0:00:01.199 *********** included: fedora.linux_system_roles.podman for managed-node2 TASK [fedora.linux_system_roles.podman : Set platform/version specific variables] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:3 Monday 07 July 2025 20:16:33 -0400 (0:00:00.101) 0:00:01.301 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Ensure ansible_facts used by role] **** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:3 Monday 07 July 2025 20:16:33 -0400 (0:00:00.040) 0:00:01.341 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_required_facts | difference(ansible_facts.keys() | list) | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Check if system is ostree] ************ task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:11 Monday 07 July 2025 20:16:33 -0400 (0:00:00.056) 0:00:01.398 *********** ok: [managed-node2] => { "changed": false, "stat": { "exists": false } } TASK [fedora.linux_system_roles.podman : Set flag to indicate system is ostree] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:16 Monday 07 July 2025 20:16:34 -0400 (0:00:00.454) 0:00:01.853 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_is_ostree": false }, "changed": false } TASK [fedora.linux_system_roles.podman : Check if transactional-update exists in /sbin] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:23 Monday 07 July 2025 20:16:34 -0400 (0:00:00.024) 0:00:01.877 *********** ok: [managed-node2] => { "changed": false, "stat": { "exists": false } } TASK [fedora.linux_system_roles.podman : Set flag if transactional-update exists] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:28 Monday 07 July 2025 20:16:34 -0400 (0:00:00.352) 0:00:02.229 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_is_transactional": false }, "changed": false } TASK [fedora.linux_system_roles.podman : Set platform/version specific variables] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:32 Monday 07 July 2025 20:16:34 -0400 (0:00:00.023) 0:00:02.253 *********** ok: [managed-node2] => (item=RedHat.yml) => { "ansible_facts": { "__podman_packages": [ "podman", "shadow-utils-subid" ] }, "ansible_included_var_files": [ "/tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/vars/RedHat.yml" ], "ansible_loop_var": "item", "changed": false, "item": "RedHat.yml" } skipping: [managed-node2] => (item=CentOS.yml) => { "ansible_loop_var": "item", "changed": false, "false_condition": "__vars_file is file", "item": "CentOS.yml", "skip_reason": "Conditional result was False" } skipping: [managed-node2] => (item=CentOS_9.yml) => { "ansible_loop_var": "item", "changed": false, "false_condition": "__vars_file is file", "item": "CentOS_9.yml", "skip_reason": "Conditional result was False" } skipping: [managed-node2] => (item=CentOS_9.yml) => { "ansible_loop_var": "item", "changed": false, "false_condition": "__vars_file is file", "item": "CentOS_9.yml", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Gather the package facts] ************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:6 Monday 07 July 2025 20:16:34 -0400 (0:00:00.038) 0:00:02.291 *********** ok: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Enable copr if requested] ************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:10 Monday 07 July 2025 20:16:35 -0400 (0:00:01.031) 0:00:03.322 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_use_copr | d(false)", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Ensure required packages are installed] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:14 Monday 07 July 2025 20:16:35 -0400 (0:00:00.055) 0:00:03.378 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "(__podman_packages | difference(ansible_facts.packages)) | list | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Notify user that reboot is needed to apply changes] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:28 Monday 07 July 2025 20:16:35 -0400 (0:00:00.074) 0:00:03.453 *********** skipping: [managed-node2] => { "false_condition": "__podman_is_transactional | d(false)" } TASK [fedora.linux_system_roles.podman : Reboot transactional update systems] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:33 Monday 07 July 2025 20:16:35 -0400 (0:00:00.083) 0:00:03.536 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_is_transactional | d(false)", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if reboot is needed and not set] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:38 Monday 07 July 2025 20:16:35 -0400 (0:00:00.078) 0:00:03.615 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_is_transactional | d(false)", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get podman version] ******************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:46 Monday 07 July 2025 20:16:35 -0400 (0:00:00.077) 0:00:03.692 *********** ok: [managed-node2] => { "changed": false, "cmd": [ "podman", "--version" ], "delta": "0:00:00.027686", "end": "2025-07-07 20:16:36.433666", "rc": 0, "start": "2025-07-07 20:16:36.405980" } STDOUT: podman version 5.5.1 TASK [fedora.linux_system_roles.podman : Set podman version] ******************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:52 Monday 07 July 2025 20:16:36 -0400 (0:00:00.548) 0:00:04.241 *********** ok: [managed-node2] => { "ansible_facts": { "podman_version": "5.5.1" }, "changed": false } TASK [fedora.linux_system_roles.podman : Podman package version must be 4.2 or later] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:56 Monday 07 July 2025 20:16:36 -0400 (0:00:00.040) 0:00:04.282 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_version is version(\"4.2\", \"<\")", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Podman package version must be 4.4 or later for quadlet, secrets] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:63 Monday 07 July 2025 20:16:36 -0400 (0:00:00.031) 0:00:04.313 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_version is version(\"4.4\", \"<\")", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Podman package version must be 4.4 or later for quadlet, secrets] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:73 Monday 07 July 2025 20:16:36 -0400 (0:00:00.058) 0:00:04.372 *********** META: end_host conditional evaluated to False, continuing execution for managed-node2 skipping: [managed-node2] => { "skip_reason": "end_host conditional evaluated to False, continuing execution for managed-node2" } MSG: end_host conditional evaluated to false, continuing execution for managed-node2 TASK [fedora.linux_system_roles.podman : Podman package version must be 5.0 or later for Pod quadlets] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:80 Monday 07 July 2025 20:16:36 -0400 (0:00:00.055) 0:00:04.427 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_version is version(\"5.0\", \"<\")", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Podman package version must be 5.0 or later for Pod quadlets] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:96 Monday 07 July 2025 20:16:36 -0400 (0:00:00.057) 0:00:04.485 *********** META: end_host conditional evaluated to False, continuing execution for managed-node2 skipping: [managed-node2] => { "skip_reason": "end_host conditional evaluated to False, continuing execution for managed-node2" } MSG: end_host conditional evaluated to false, continuing execution for managed-node2 TASK [fedora.linux_system_roles.podman : Check user and group information] ***** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:109 Monday 07 July 2025 20:16:36 -0400 (0:00:00.124) 0:00:04.609 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Get user information] ***************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:2 Monday 07 July 2025 20:16:36 -0400 (0:00:00.101) 0:00:04.710 *********** ok: [managed-node2] => { "ansible_facts": { "getent_passwd": { "root": [ "x", "0", "0", "root", "/root", "/bin/bash" ] } }, "changed": false } TASK [fedora.linux_system_roles.podman : Fail if user does not exist] ********** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:9 Monday 07 July 2025 20:16:37 -0400 (0:00:00.534) 0:00:05.245 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not ansible_facts[\"getent_passwd\"][__podman_user]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set group for podman user] ************ task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:16 Monday 07 July 2025 20:16:37 -0400 (0:00:00.034) 0:00:05.279 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_group": "0" }, "changed": false } TASK [fedora.linux_system_roles.podman : See if getsubids exists] ************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:31 Monday 07 July 2025 20:16:37 -0400 (0:00:00.041) 0:00:05.321 *********** ok: [managed-node2] => { "changed": false, "stat": { "atime": 1751933455.3375134, "attr_flags": "", "attributes": [], "block_size": 4096, "blocks": 32, "charset": "binary", "checksum": "8bedde2dbca15219e1a3b95a68a8c0d26a92ba62", "ctime": 1751933428.1541803, "dev": 51713, "device_type": 0, "executable": true, "exists": true, "gid": 0, "gr_name": "root", "inode": 2118, "isblk": false, "ischr": false, "isdir": false, "isfifo": false, "isgid": false, "islnk": false, "isreg": true, "issock": false, "isuid": false, "mimetype": "application/x-pie-executable", "mode": "0755", "mtime": 1748273472.0, "nlink": 1, "path": "/usr/bin/getsubids", "pw_name": "root", "readable": true, "rgrp": true, "roth": true, "rusr": true, "size": 15496, "uid": 0, "version": "2386316427", "wgrp": false, "woth": false, "writeable": true, "wusr": true, "xgrp": true, "xoth": true, "xusr": true } } TASK [fedora.linux_system_roles.podman : Check with getsubids for user subuids] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:42 Monday 07 July 2025 20:16:37 -0400 (0:00:00.401) 0:00:05.722 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_user not in [\"root\", \"0\"]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Check with getsubids for user subgids] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:47 Monday 07 July 2025 20:16:38 -0400 (0:00:00.052) 0:00:05.774 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_user not in [\"root\", \"0\"]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set user subuid and subgid info] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:52 Monday 07 July 2025 20:16:38 -0400 (0:00:00.052) 0:00:05.827 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_user not in [\"root\", \"0\"]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get subuid file] ********************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:65 Monday 07 July 2025 20:16:38 -0400 (0:00:00.055) 0:00:05.882 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get subgid file] ********************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:70 Monday 07 July 2025 20:16:38 -0400 (0:00:00.055) 0:00:05.938 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set user subuid and subgid info] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:75 Monday 07 July 2025 20:16:38 -0400 (0:00:00.053) 0:00:05.991 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user not in subuid file] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:85 Monday 07 July 2025 20:16:38 -0400 (0:00:00.053) 0:00:06.045 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user not in subgid file] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:92 Monday 07 July 2025 20:16:38 -0400 (0:00:00.036) 0:00:06.081 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set config file paths] **************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:115 Monday 07 July 2025 20:16:38 -0400 (0:00:00.037) 0:00:06.118 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_container_conf_file": "/etc/containers/containers.conf.d/50-systemroles.conf", "__podman_parent_mode": "0755", "__podman_parent_path": "/etc/containers", "__podman_policy_json_file": "/etc/containers/policy.json", "__podman_registries_conf_file": "/etc/containers/registries.conf.d/50-systemroles.conf", "__podman_storage_conf_file": "/etc/containers/storage.conf" }, "changed": false } TASK [fedora.linux_system_roles.podman : Handle container.conf.d] ************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:126 Monday 07 July 2025 20:16:38 -0400 (0:00:00.072) 0:00:06.191 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_container_conf_d.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Ensure containers.d exists] *********** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_container_conf_d.yml:5 Monday 07 July 2025 20:16:38 -0400 (0:00:00.060) 0:00:06.251 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_containers_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Update container config file] ********* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_container_conf_d.yml:13 Monday 07 July 2025 20:16:38 -0400 (0:00:00.031) 0:00:06.282 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_containers_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Handle registries.conf.d] ************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:129 Monday 07 July 2025 20:16:38 -0400 (0:00:00.030) 0:00:06.313 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_registries_conf_d.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Ensure registries.d exists] *********** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_registries_conf_d.yml:5 Monday 07 July 2025 20:16:38 -0400 (0:00:00.068) 0:00:06.381 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_registries_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Update registries config file] ******** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_registries_conf_d.yml:13 Monday 07 July 2025 20:16:38 -0400 (0:00:00.078) 0:00:06.459 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_registries_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Handle storage.conf] ****************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:132 Monday 07 July 2025 20:16:38 -0400 (0:00:00.038) 0:00:06.498 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_storage_conf.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Ensure storage.conf parent dir exists] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_storage_conf.yml:7 Monday 07 July 2025 20:16:38 -0400 (0:00:00.072) 0:00:06.571 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_storage_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Update storage config file] *********** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_storage_conf.yml:15 Monday 07 July 2025 20:16:38 -0400 (0:00:00.031) 0:00:06.602 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_storage_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Handle policy.json] ******************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:135 Monday 07 July 2025 20:16:38 -0400 (0:00:00.032) 0:00:06.634 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_policy_json.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Ensure policy.json parent dir exists] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_policy_json.yml:8 Monday 07 July 2025 20:16:38 -0400 (0:00:00.060) 0:00:06.695 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_policy_json | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Stat the policy.json file] ************ task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_policy_json.yml:16 Monday 07 July 2025 20:16:38 -0400 (0:00:00.030) 0:00:06.726 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_policy_json | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get the existing policy.json] ********* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_policy_json.yml:21 Monday 07 July 2025 20:16:39 -0400 (0:00:00.036) 0:00:06.762 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_policy_json | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Write new policy.json file] *********** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_policy_json.yml:27 Monday 07 July 2025 20:16:39 -0400 (0:00:00.050) 0:00:06.813 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_policy_json | length > 0", "skip_reason": "Conditional result was False" } TASK [Manage firewall for specified ports] ************************************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:141 Monday 07 July 2025 20:16:39 -0400 (0:00:00.050) 0:00:06.863 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_firewall | length > 0", "skip_reason": "Conditional result was False" } TASK [Manage selinux for specified ports] ************************************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:148 Monday 07 July 2025 20:16:39 -0400 (0:00:00.035) 0:00:06.898 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_selinux_ports | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Keep track of users that need to cancel linger] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:155 Monday 07 July 2025 20:16:39 -0400 (0:00:00.039) 0:00:06.938 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_cancel_user_linger": [] }, "changed": false } TASK [fedora.linux_system_roles.podman : Handle certs.d files - present] ******* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:159 Monday 07 July 2025 20:16:39 -0400 (0:00:00.038) 0:00:06.977 *********** skipping: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Handle credential files - present] **** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:168 Monday 07 July 2025 20:16:39 -0400 (0:00:00.032) 0:00:07.009 *********** skipping: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Handle secrets] *********************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:177 Monday 07 July 2025 20:16:39 -0400 (0:00:00.029) 0:00:07.038 *********** skipping: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Handle Kubernetes specifications] ***** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:184 Monday 07 July 2025 20:16:39 -0400 (0:00:00.027) 0:00:07.065 *********** skipping: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Handle Quadlet specifications] ******** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:191 Monday 07 July 2025 20:16:39 -0400 (0:00:00.026) 0:00:07.092 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml for managed-node2 => (item=(censored due to no_log)) included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml for managed-node2 => (item=(censored due to no_log)) TASK [fedora.linux_system_roles.podman : Set per-container variables part 0] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:14 Monday 07 July 2025 20:16:39 -0400 (0:00:00.114) 0:00:07.207 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_quadlet_file_src": "", "__podman_quadlet_spec": { "Pod": { "PodName": "quadlet-pod" } }, "__podman_quadlet_str": "", "__podman_quadlet_template_src": "" }, "changed": false } TASK [fedora.linux_system_roles.podman : Set per-container variables part 1] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:25 Monday 07 July 2025 20:16:39 -0400 (0:00:00.041) 0:00:07.248 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_continue_if_pull_fails": false, "__podman_pull_image": true, "__podman_state": "created", "__podman_systemd_unit_scope": "", "__podman_user": "root" }, "changed": false } TASK [fedora.linux_system_roles.podman : Fail if no quadlet spec is given] ***** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:35 Monday 07 July 2025 20:16:39 -0400 (0:00:00.039) 0:00:07.288 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_quadlet_spec | length == 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set per-container variables part 2] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:48 Monday 07 July 2025 20:16:39 -0400 (0:00:00.029) 0:00:07.318 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_quadlet_name": "quadlet-pod-pod", "__podman_quadlet_type": "pod", "__podman_rootless": false }, "changed": false } TASK [fedora.linux_system_roles.podman : Check user and group information] ***** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:57 Monday 07 July 2025 20:16:39 -0400 (0:00:00.047) 0:00:07.366 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Get user information] ***************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:2 Monday 07 July 2025 20:16:39 -0400 (0:00:00.062) 0:00:07.428 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "'getent_passwd' not in ansible_facts or __podman_user not in ansible_facts['getent_passwd']", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user does not exist] ********** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:9 Monday 07 July 2025 20:16:39 -0400 (0:00:00.034) 0:00:07.463 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not ansible_facts[\"getent_passwd\"][__podman_user]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set group for podman user] ************ task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:16 Monday 07 July 2025 20:16:39 -0400 (0:00:00.035) 0:00:07.498 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_group": "0" }, "changed": false } TASK [fedora.linux_system_roles.podman : See if getsubids exists] ************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:31 Monday 07 July 2025 20:16:39 -0400 (0:00:00.042) 0:00:07.540 *********** ok: [managed-node2] => { "changed": false, "stat": { "atime": 1751933455.3375134, "attr_flags": "", "attributes": [], "block_size": 4096, "blocks": 32, "charset": "binary", "checksum": "8bedde2dbca15219e1a3b95a68a8c0d26a92ba62", "ctime": 1751933428.1541803, "dev": 51713, "device_type": 0, "executable": true, "exists": true, "gid": 0, "gr_name": "root", "inode": 2118, "isblk": false, "ischr": false, "isdir": false, "isfifo": false, "isgid": false, "islnk": false, "isreg": true, "issock": false, "isuid": false, "mimetype": "application/x-pie-executable", "mode": "0755", "mtime": 1748273472.0, "nlink": 1, "path": "/usr/bin/getsubids", "pw_name": "root", "readable": true, "rgrp": true, "roth": true, "rusr": true, "size": 15496, "uid": 0, "version": "2386316427", "wgrp": false, "woth": false, "writeable": true, "wusr": true, "xgrp": true, "xoth": true, "xusr": true } } TASK [fedora.linux_system_roles.podman : Check with getsubids for user subuids] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:42 Monday 07 July 2025 20:16:40 -0400 (0:00:00.367) 0:00:07.908 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_user not in [\"root\", \"0\"]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Check with getsubids for user subgids] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:47 Monday 07 July 2025 20:16:40 -0400 (0:00:00.032) 0:00:07.940 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_user not in [\"root\", \"0\"]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set user subuid and subgid info] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:52 Monday 07 July 2025 20:16:40 -0400 (0:00:00.032) 0:00:07.973 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_user not in [\"root\", \"0\"]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get subuid file] ********************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:65 Monday 07 July 2025 20:16:40 -0400 (0:00:00.029) 0:00:08.003 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get subgid file] ********************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:70 Monday 07 July 2025 20:16:40 -0400 (0:00:00.032) 0:00:08.036 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set user subuid and subgid info] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:75 Monday 07 July 2025 20:16:40 -0400 (0:00:00.031) 0:00:08.068 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user not in subuid file] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:85 Monday 07 July 2025 20:16:40 -0400 (0:00:00.031) 0:00:08.100 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user not in subgid file] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:92 Monday 07 July 2025 20:16:40 -0400 (0:00:00.031) 0:00:08.132 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set per-container variables part 3] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:62 Monday 07 July 2025 20:16:40 -0400 (0:00:00.033) 0:00:08.165 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_activate_systemd_unit": true, "__podman_images_found": [], "__podman_kube_yamls_raw": "", "__podman_service_name": "quadlet-pod-pod-pod.service", "__podman_systemd_scope": "system", "__podman_user_home_dir": "/root", "__podman_xdg_runtime_dir": "/run/user/0" }, "changed": false } TASK [fedora.linux_system_roles.podman : Set per-container variables part 4] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:73 Monday 07 July 2025 20:16:40 -0400 (0:00:00.052) 0:00:08.217 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_quadlet_path": "/etc/containers/systemd" }, "changed": false } TASK [fedora.linux_system_roles.podman : Get kube yaml contents] *************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:77 Monday 07 July 2025 20:16:40 -0400 (0:00:00.034) 0:00:08.252 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_kube_yamls_raw | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set per-container variables part 5] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:88 Monday 07 July 2025 20:16:40 -0400 (0:00:00.033) 0:00:08.286 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_images": [], "__podman_quadlet_file": "/etc/containers/systemd/quadlet-pod-pod.pod", "__podman_volumes": [] }, "changed": false } TASK [fedora.linux_system_roles.podman : Set per-container variables part 6] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:106 Monday 07 July 2025 20:16:40 -0400 (0:00:00.113) 0:00:08.399 *********** ok: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Cleanup quadlets] ********************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:113 Monday 07 July 2025 20:16:40 -0400 (0:00:00.038) 0:00:08.438 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_state == \"absent\"", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Create and update quadlets] *********** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:117 Monday 07 July 2025 20:16:40 -0400 (0:00:00.030) 0:00:08.468 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Manage linger] ************************ task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:2 Monday 07 July 2025 20:16:40 -0400 (0:00:00.070) 0:00:08.538 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/manage_linger.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Enable linger if needed] ************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/manage_linger.yml:12 Monday 07 July 2025 20:16:40 -0400 (0:00:00.053) 0:00:08.592 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_rootless | bool", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Mark user as not yet needing to cancel linger] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/manage_linger.yml:18 Monday 07 July 2025 20:16:40 -0400 (0:00:00.031) 0:00:08.623 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_rootless | bool", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Mark user for possible linger cancel] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/manage_linger.yml:22 Monday 07 July 2025 20:16:40 -0400 (0:00:00.030) 0:00:08.653 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_rootless | bool", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Create host directories] ************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:7 Monday 07 July 2025 20:16:40 -0400 (0:00:00.030) 0:00:08.684 *********** skipping: [managed-node2] => { "changed": false, "skipped_reason": "No items in the list" } TASK [fedora.linux_system_roles.podman : Ensure container images are present] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:18 Monday 07 July 2025 20:16:40 -0400 (0:00:00.027) 0:00:08.712 *********** skipping: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Ensure the quadlet directory is present] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:39 Monday 07 July 2025 20:16:41 -0400 (0:00:00.031) 0:00:08.743 *********** ok: [managed-node2] => { "changed": false, "gid": 0, "group": "root", "mode": "0755", "owner": "root", "path": "/etc/containers/systemd", "secontext": "system_u:object_r:etc_t:s0", "size": 6, "state": "directory", "uid": 0 } TASK [fedora.linux_system_roles.podman : Ensure quadlet file is copied] ******** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:50 Monday 07 July 2025 20:16:41 -0400 (0:00:00.462) 0:00:09.206 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_quadlet_file_src | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Ensure quadlet file content is present] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:62 Monday 07 July 2025 20:16:41 -0400 (0:00:00.034) 0:00:09.241 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_quadlet_str | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Ensure quadlet file is present] ******* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:75 Monday 07 July 2025 20:16:41 -0400 (0:00:00.033) 0:00:09.274 *********** changed: [managed-node2] => { "changed": true, "checksum": "1884c880482430d8bf2e944b003734fb8b7a462d", "dest": "/etc/containers/systemd/quadlet-pod-pod.pod", "gid": 0, "group": "root", "md5sum": "43c9e9c2ff3ad9cd27c1f2d12f03aee0", "mode": "0644", "owner": "root", "secontext": "system_u:object_r:etc_t:s0", "size": 70, "src": "/root/.ansible/tmp/ansible-tmp-1751933801.5835004-19965-109711770661066/.source.pod", "state": "file", "uid": 0 } TASK [fedora.linux_system_roles.podman : Reload systemctl] ********************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:87 Monday 07 July 2025 20:16:42 -0400 (0:00:00.762) 0:00:10.036 *********** ok: [managed-node2] => { "changed": false, "name": null, "status": {} } TASK [fedora.linux_system_roles.podman : Start service] ************************ task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:115 Monday 07 July 2025 20:16:43 -0400 (0:00:00.913) 0:00:10.950 *********** changed: [managed-node2] => { "changed": true, "name": "quadlet-pod-pod-pod.service", "state": "started", "status": { "AccessSELinuxContext": "system_u:object_r:systemd_unit_file_t:s0", "ActiveEnterTimestampMonotonic": "0", "ActiveExitTimestampMonotonic": "0", "ActiveState": "inactive", "After": "sysinit.target basic.target systemd-journald.socket system.slice network-online.target -.mount", "AllowIsolate": "no", "AssertResult": "no", "AssertTimestampMonotonic": "0", "Before": "shutdown.target", "BlockIOAccounting": "no", "BlockIOWeight": "[not set]", "CPUAccounting": "yes", "CPUAffinityFromNUMA": "no", "CPUQuotaPerSecUSec": "infinity", "CPUQuotaPeriodUSec": "infinity", "CPUSchedulingPolicy": "0", "CPUSchedulingPriority": "0", "CPUSchedulingResetOnFork": "no", "CPUShares": "[not set]", "CPUUsageNSec": "[not set]", "CPUWeight": "[not set]", "CacheDirectoryMode": "0755", "CanFreeze": "yes", "CanIsolate": "no", "CanReload": "no", "CanStart": "yes", "CanStop": "yes", "CapabilityBoundingSet": "cap_chown cap_dac_override cap_dac_read_search cap_fowner cap_fsetid cap_kill cap_setgid cap_setuid cap_setpcap cap_linux_immutable cap_net_bind_service cap_net_broadcast cap_net_admin cap_net_raw cap_ipc_lock cap_ipc_owner cap_sys_module cap_sys_rawio cap_sys_chroot cap_sys_ptrace cap_sys_pacct cap_sys_admin cap_sys_boot cap_sys_nice cap_sys_resource cap_sys_time cap_sys_tty_config cap_mknod cap_lease cap_audit_write cap_audit_control cap_setfcap cap_mac_override cap_mac_admin cap_syslog cap_wake_alarm cap_block_suspend cap_audit_read cap_perfmon cap_bpf cap_checkpoint_restore", "CleanResult": "success", "CollectMode": "inactive", "ConditionResult": "no", "ConditionTimestampMonotonic": "0", "ConfigurationDirectoryMode": "0755", "Conflicts": "shutdown.target", "ControlGroupId": "0", "ControlPID": "0", "CoredumpFilter": "0x33", "DefaultDependencies": "yes", "DefaultMemoryLow": "0", "DefaultMemoryMin": "0", "Delegate": "no", "Description": "quadlet-pod-pod-pod.service", "DevicePolicy": "auto", "DynamicUser": "no", "Environment": "PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service", "ExecMainCode": "0", "ExecMainExitTimestampMonotonic": "0", "ExecMainPID": "0", "ExecMainStartTimestampMonotonic": "0", "ExecMainStatus": "0", "ExecStart": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman pod start --pod-id-file=/run/quadlet-pod-pod-pod.pod-id ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStartEx": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman pod start --pod-id-file=/run/quadlet-pod-pod-pod.pod-id ; flags= ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStartPre": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman pod create --infra-conmon-pidfile=/run/quadlet-pod-pod-pod.pid --pod-id-file=/run/quadlet-pod-pod-pod.pod-id --exit-policy=stop --replace --infra-name quadlet-pod-infra --name quadlet-pod ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStartPreEx": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman pod create --infra-conmon-pidfile=/run/quadlet-pod-pod-pod.pid --pod-id-file=/run/quadlet-pod-pod-pod.pod-id --exit-policy=stop --replace --infra-name quadlet-pod-infra --name quadlet-pod ; flags= ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStop": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman pod stop --pod-id-file=/run/quadlet-pod-pod-pod.pod-id --ignore --time=10 ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStopEx": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman pod stop --pod-id-file=/run/quadlet-pod-pod-pod.pod-id --ignore --time=10 ; flags= ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStopPost": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman pod rm --pod-id-file=/run/quadlet-pod-pod-pod.pod-id --ignore --force ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStopPostEx": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman pod rm --pod-id-file=/run/quadlet-pod-pod-pod.pod-id --ignore --force ; flags= ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExitType": "main", "FailureAction": "none", "FileDescriptorStoreMax": "0", "FinalKillSignal": "9", "FragmentPath": "/run/systemd/generator/quadlet-pod-pod-pod.service", "FreezerState": "running", "GID": "[not set]", "GuessMainPID": "yes", "IOAccounting": "no", "IOReadBytes": "18446744073709551615", "IOReadOperations": "18446744073709551615", "IOSchedulingClass": "2", "IOSchedulingPriority": "4", "IOWeight": "[not set]", "IOWriteBytes": "18446744073709551615", "IOWriteOperations": "18446744073709551615", "IPAccounting": "no", "IPEgressBytes": "[no data]", "IPEgressPackets": "[no data]", "IPIngressBytes": "[no data]", "IPIngressPackets": "[no data]", "Id": "quadlet-pod-pod-pod.service", "IgnoreOnIsolate": "no", "IgnoreSIGPIPE": "yes", "InactiveEnterTimestampMonotonic": "0", "InactiveExitTimestampMonotonic": "0", "JobRunningTimeoutUSec": "infinity", "JobTimeoutAction": "none", "JobTimeoutUSec": "infinity", "KeyringMode": "private", "KillMode": "control-group", "KillSignal": "15", "LimitAS": "infinity", "LimitASSoft": "infinity", "LimitCORE": "infinity", "LimitCORESoft": "infinity", "LimitCPU": "infinity", "LimitCPUSoft": "infinity", "LimitDATA": "infinity", "LimitDATASoft": "infinity", "LimitFSIZE": "infinity", "LimitFSIZESoft": "infinity", "LimitLOCKS": "infinity", "LimitLOCKSSoft": "infinity", "LimitMEMLOCK": "8388608", "LimitMEMLOCKSoft": "8388608", "LimitMSGQUEUE": "819200", "LimitMSGQUEUESoft": "819200", "LimitNICE": "0", "LimitNICESoft": "0", "LimitNOFILE": "524288", "LimitNOFILESoft": "1024", "LimitNPROC": "13688", "LimitNPROCSoft": "13688", "LimitRSS": "infinity", "LimitRSSSoft": "infinity", "LimitRTPRIO": "0", "LimitRTPRIOSoft": "0", "LimitRTTIME": "infinity", "LimitRTTIMESoft": "infinity", "LimitSIGPENDING": "13688", "LimitSIGPENDINGSoft": "13688", "LimitSTACK": "infinity", "LimitSTACKSoft": "8388608", "LoadState": "loaded", "LockPersonality": "no", "LogLevelMax": "-1", "LogRateLimitBurst": "0", "LogRateLimitIntervalUSec": "0", "LogsDirectoryMode": "0755", "MainPID": "0", "ManagedOOMMemoryPressure": "auto", "ManagedOOMMemoryPressureLimit": "0", "ManagedOOMPreference": "none", "ManagedOOMSwap": "auto", "MemoryAccounting": "yes", "MemoryAvailable": "infinity", "MemoryCurrent": "[not set]", "MemoryDenyWriteExecute": "no", "MemoryHigh": "infinity", "MemoryLimit": "infinity", "MemoryLow": "0", "MemoryMax": "infinity", "MemoryMin": "0", "MemorySwapMax": "infinity", "MountAPIVFS": "no", "NFileDescriptorStore": "0", "NRestarts": "0", "NUMAPolicy": "n/a", "Names": "quadlet-pod-pod-pod.service", "NeedDaemonReload": "no", "Nice": "0", "NoNewPrivileges": "no", "NonBlocking": "no", "NotifyAccess": "none", "OOMPolicy": "stop", "OOMScoreAdjust": "0", "OnFailureJobMode": "replace", "OnSuccessJobMode": "fail", "PIDFile": "/run/quadlet-pod-pod-pod.pid", "Perpetual": "no", "PrivateDevices": "no", "PrivateIPC": "no", "PrivateMounts": "no", "PrivateNetwork": "no", "PrivateTmp": "no", "PrivateUsers": "no", "ProcSubset": "all", "ProtectClock": "no", "ProtectControlGroups": "no", "ProtectHome": "no", "ProtectHostname": "no", "ProtectKernelLogs": "no", "ProtectKernelModules": "no", "ProtectKernelTunables": "no", "ProtectProc": "default", "ProtectSystem": "no", "RefuseManualStart": "no", "RefuseManualStop": "no", "ReloadResult": "success", "ReloadSignal": "1", "RemainAfterExit": "no", "RemoveIPC": "no", "Requires": "-.mount sysinit.target system.slice", "RequiresMountsFor": "/run/containers", "Restart": "on-failure", "RestartKillSignal": "15", "RestartUSec": "100ms", "RestrictNamespaces": "no", "RestrictRealtime": "no", "RestrictSUIDSGID": "no", "Result": "success", "RootDirectoryStartOnly": "no", "RuntimeDirectoryMode": "0755", "RuntimeDirectoryPreserve": "no", "RuntimeMaxUSec": "infinity", "RuntimeRandomizedExtraUSec": "0", "SameProcessGroup": "no", "SecureBits": "0", "SendSIGHUP": "no", "SendSIGKILL": "yes", "Slice": "system.slice", "SourcePath": "/etc/containers/systemd/quadlet-pod-pod.pod", "StandardError": "inherit", "StandardInput": "null", "StandardOutput": "journal", "StartLimitAction": "none", "StartLimitBurst": "5", "StartLimitIntervalUSec": "10s", "StartupBlockIOWeight": "[not set]", "StartupCPUShares": "[not set]", "StartupCPUWeight": "[not set]", "StartupIOWeight": "[not set]", "StateChangeTimestampMonotonic": "0", "StateDirectoryMode": "0755", "StatusErrno": "0", "StopWhenUnneeded": "no", "SubState": "dead", "SuccessAction": "none", "SyslogFacility": "3", "SyslogIdentifier": "quadlet-pod-pod-pod", "SyslogLevel": "6", "SyslogLevelPrefix": "yes", "SyslogPriority": "30", "SystemCallErrorNumber": "2147483646", "TTYReset": "no", "TTYVHangup": "no", "TTYVTDisallocate": "no", "TasksAccounting": "yes", "TasksCurrent": "[not set]", "TasksMax": "21900", "TimeoutAbortUSec": "1min 30s", "TimeoutCleanUSec": "infinity", "TimeoutStartFailureMode": "terminate", "TimeoutStartUSec": "1min 30s", "TimeoutStopFailureMode": "terminate", "TimeoutStopUSec": "1min 30s", "TimerSlackNSec": "50000", "Transient": "no", "Type": "forking", "UID": "[not set]", "UMask": "0022", "UnitFilePreset": "disabled", "UnitFileState": "generated", "UtmpMode": "init", "Wants": "network-online.target", "WatchdogSignal": "6", "WatchdogTimestampMonotonic": "0", "WatchdogUSec": "infinity" } } TASK [fedora.linux_system_roles.podman : Restart service] ********************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:131 Monday 07 July 2025 20:16:44 -0400 (0:00:00.859) 0:00:11.809 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_service_started is changed", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set per-container variables part 0] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:14 Monday 07 July 2025 20:16:44 -0400 (0:00:00.032) 0:00:11.842 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_quadlet_file_src": "", "__podman_quadlet_spec": { "Container": { "ContainerName": "quadlet-pod-container", "Exec": "/bin/busybox-extras httpd -f -p 80", "Image": "quay.io/libpod/testimage:20210610", "Pod": "quadlet-pod-pod.pod" }, "Install": { "WantedBy": "default.target" } }, "__podman_quadlet_str": "", "__podman_quadlet_template_src": "" }, "changed": false } TASK [fedora.linux_system_roles.podman : Set per-container variables part 1] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:25 Monday 07 July 2025 20:16:44 -0400 (0:00:00.042) 0:00:11.885 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_continue_if_pull_fails": false, "__podman_pull_image": true, "__podman_state": "created", "__podman_systemd_unit_scope": "", "__podman_user": "root" }, "changed": false } TASK [fedora.linux_system_roles.podman : Fail if no quadlet spec is given] ***** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:35 Monday 07 July 2025 20:16:44 -0400 (0:00:00.039) 0:00:11.924 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_quadlet_spec | length == 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set per-container variables part 2] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:48 Monday 07 July 2025 20:16:44 -0400 (0:00:00.070) 0:00:11.995 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_quadlet_name": "quadlet-pod-container", "__podman_quadlet_type": "container", "__podman_rootless": false }, "changed": false } TASK [fedora.linux_system_roles.podman : Check user and group information] ***** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:57 Monday 07 July 2025 20:16:44 -0400 (0:00:00.049) 0:00:12.044 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Get user information] ***************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:2 Monday 07 July 2025 20:16:44 -0400 (0:00:00.061) 0:00:12.105 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "'getent_passwd' not in ansible_facts or __podman_user not in ansible_facts['getent_passwd']", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user does not exist] ********** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:9 Monday 07 July 2025 20:16:44 -0400 (0:00:00.034) 0:00:12.140 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not ansible_facts[\"getent_passwd\"][__podman_user]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set group for podman user] ************ task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:16 Monday 07 July 2025 20:16:44 -0400 (0:00:00.036) 0:00:12.177 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_group": "0" }, "changed": false } TASK [fedora.linux_system_roles.podman : See if getsubids exists] ************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:31 Monday 07 July 2025 20:16:44 -0400 (0:00:00.044) 0:00:12.221 *********** ok: [managed-node2] => { "changed": false, "stat": { "atime": 1751933455.3375134, "attr_flags": "", "attributes": [], "block_size": 4096, "blocks": 32, "charset": "binary", "checksum": "8bedde2dbca15219e1a3b95a68a8c0d26a92ba62", "ctime": 1751933428.1541803, "dev": 51713, "device_type": 0, "executable": true, "exists": true, "gid": 0, "gr_name": "root", "inode": 2118, "isblk": false, "ischr": false, "isdir": false, "isfifo": false, "isgid": false, "islnk": false, "isreg": true, "issock": false, "isuid": false, "mimetype": "application/x-pie-executable", "mode": "0755", "mtime": 1748273472.0, "nlink": 1, "path": "/usr/bin/getsubids", "pw_name": "root", "readable": true, "rgrp": true, "roth": true, "rusr": true, "size": 15496, "uid": 0, "version": "2386316427", "wgrp": false, "woth": false, "writeable": true, "wusr": true, "xgrp": true, "xoth": true, "xusr": true } } TASK [fedora.linux_system_roles.podman : Check with getsubids for user subuids] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:42 Monday 07 July 2025 20:16:44 -0400 (0:00:00.371) 0:00:12.592 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_user not in [\"root\", \"0\"]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Check with getsubids for user subgids] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:47 Monday 07 July 2025 20:16:44 -0400 (0:00:00.031) 0:00:12.624 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_user not in [\"root\", \"0\"]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set user subuid and subgid info] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:52 Monday 07 July 2025 20:16:44 -0400 (0:00:00.034) 0:00:12.658 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_user not in [\"root\", \"0\"]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get subuid file] ********************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:65 Monday 07 July 2025 20:16:44 -0400 (0:00:00.031) 0:00:12.690 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get subgid file] ********************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:70 Monday 07 July 2025 20:16:44 -0400 (0:00:00.033) 0:00:12.723 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set user subuid and subgid info] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:75 Monday 07 July 2025 20:16:45 -0400 (0:00:00.032) 0:00:12.756 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user not in subuid file] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:85 Monday 07 July 2025 20:16:45 -0400 (0:00:00.032) 0:00:12.788 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user not in subgid file] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:92 Monday 07 July 2025 20:16:45 -0400 (0:00:00.031) 0:00:12.819 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set per-container variables part 3] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:62 Monday 07 July 2025 20:16:45 -0400 (0:00:00.035) 0:00:12.855 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_activate_systemd_unit": true, "__podman_images_found": [ "quay.io/libpod/testimage:20210610" ], "__podman_kube_yamls_raw": "", "__podman_service_name": "quadlet-pod-container.service", "__podman_systemd_scope": "system", "__podman_user_home_dir": "/root", "__podman_xdg_runtime_dir": "/run/user/0" }, "changed": false } TASK [fedora.linux_system_roles.podman : Set per-container variables part 4] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:73 Monday 07 July 2025 20:16:45 -0400 (0:00:00.054) 0:00:12.909 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_quadlet_path": "/etc/containers/systemd" }, "changed": false } TASK [fedora.linux_system_roles.podman : Get kube yaml contents] *************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:77 Monday 07 July 2025 20:16:45 -0400 (0:00:00.037) 0:00:12.947 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_kube_yamls_raw | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set per-container variables part 5] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:88 Monday 07 July 2025 20:16:45 -0400 (0:00:00.034) 0:00:12.981 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_images": [ "quay.io/libpod/testimage:20210610" ], "__podman_quadlet_file": "/etc/containers/systemd/quadlet-pod-container.container", "__podman_volumes": [] }, "changed": false } TASK [fedora.linux_system_roles.podman : Set per-container variables part 6] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:106 Monday 07 July 2025 20:16:45 -0400 (0:00:00.077) 0:00:13.059 *********** ok: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Cleanup quadlets] ********************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:113 Monday 07 July 2025 20:16:45 -0400 (0:00:00.038) 0:00:13.097 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_state == \"absent\"", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Create and update quadlets] *********** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:117 Monday 07 July 2025 20:16:45 -0400 (0:00:00.068) 0:00:13.166 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Manage linger] ************************ task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:2 Monday 07 July 2025 20:16:45 -0400 (0:00:00.067) 0:00:13.233 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/manage_linger.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Enable linger if needed] ************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/manage_linger.yml:12 Monday 07 July 2025 20:16:45 -0400 (0:00:00.053) 0:00:13.287 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_rootless | bool", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Mark user as not yet needing to cancel linger] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/manage_linger.yml:18 Monday 07 July 2025 20:16:45 -0400 (0:00:00.030) 0:00:13.318 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_rootless | bool", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Mark user for possible linger cancel] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/manage_linger.yml:22 Monday 07 July 2025 20:16:45 -0400 (0:00:00.032) 0:00:13.351 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_rootless | bool", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Create host directories] ************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:7 Monday 07 July 2025 20:16:45 -0400 (0:00:00.030) 0:00:13.381 *********** skipping: [managed-node2] => { "changed": false, "skipped_reason": "No items in the list" } TASK [fedora.linux_system_roles.podman : Ensure container images are present] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:18 Monday 07 July 2025 20:16:45 -0400 (0:00:00.030) 0:00:13.412 *********** ok: [managed-node2] => (item=None) => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } ok: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Ensure the quadlet directory is present] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:39 Monday 07 July 2025 20:16:46 -0400 (0:00:01.082) 0:00:14.494 *********** ok: [managed-node2] => { "changed": false, "gid": 0, "group": "root", "mode": "0755", "owner": "root", "path": "/etc/containers/systemd", "secontext": "system_u:object_r:etc_t:s0", "size": 33, "state": "directory", "uid": 0 } TASK [fedora.linux_system_roles.podman : Ensure quadlet file is copied] ******** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:50 Monday 07 July 2025 20:16:47 -0400 (0:00:00.371) 0:00:14.866 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_quadlet_file_src | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Ensure quadlet file content is present] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:62 Monday 07 July 2025 20:16:47 -0400 (0:00:00.032) 0:00:14.898 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_quadlet_str | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Ensure quadlet file is present] ******* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:75 Monday 07 July 2025 20:16:47 -0400 (0:00:00.034) 0:00:14.933 *********** changed: [managed-node2] => { "changed": true, "checksum": "f0b5c8159fc3c65bf9310a371751609e4c1ba4c3", "dest": "/etc/containers/systemd/quadlet-pod-container.container", "gid": 0, "group": "root", "md5sum": "daaf6e904ff3c17edeb801084cfe256f", "mode": "0644", "owner": "root", "secontext": "system_u:object_r:etc_t:s0", "size": 230, "src": "/root/.ansible/tmp/ansible-tmp-1751933807.2449324-20069-121688553430320/.source.container", "state": "file", "uid": 0 } TASK [fedora.linux_system_roles.podman : Reload systemctl] ********************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:87 Monday 07 July 2025 20:16:47 -0400 (0:00:00.690) 0:00:15.624 *********** ok: [managed-node2] => { "changed": false, "name": null, "status": {} } TASK [fedora.linux_system_roles.podman : Start service] ************************ task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:115 Monday 07 July 2025 20:16:48 -0400 (0:00:00.686) 0:00:16.310 *********** changed: [managed-node2] => { "changed": true, "name": "quadlet-pod-container.service", "state": "started", "status": { "AccessSELinuxContext": "system_u:object_r:systemd_unit_file_t:s0", "ActiveEnterTimestampMonotonic": "0", "ActiveExitTimestampMonotonic": "0", "ActiveState": "inactive", "After": "network-online.target systemd-journald.socket quadlet-pod-pod-pod.service system.slice -.mount basic.target sysinit.target", "AllowIsolate": "no", "AssertResult": "no", "AssertTimestampMonotonic": "0", "Before": "shutdown.target multi-user.target", "BindsTo": "quadlet-pod-pod-pod.service", "BlockIOAccounting": "no", "BlockIOWeight": "[not set]", "CPUAccounting": "yes", "CPUAffinityFromNUMA": "no", "CPUQuotaPerSecUSec": "infinity", "CPUQuotaPeriodUSec": "infinity", "CPUSchedulingPolicy": "0", "CPUSchedulingPriority": "0", "CPUSchedulingResetOnFork": "no", "CPUShares": "[not set]", "CPUUsageNSec": "[not set]", "CPUWeight": "[not set]", "CacheDirectoryMode": "0755", "CanFreeze": "yes", "CanIsolate": "no", "CanReload": "no", "CanStart": "yes", "CanStop": "yes", "CapabilityBoundingSet": "cap_chown cap_dac_override cap_dac_read_search cap_fowner cap_fsetid cap_kill cap_setgid cap_setuid cap_setpcap cap_linux_immutable cap_net_bind_service cap_net_broadcast cap_net_admin cap_net_raw cap_ipc_lock cap_ipc_owner cap_sys_module cap_sys_rawio cap_sys_chroot cap_sys_ptrace cap_sys_pacct cap_sys_admin cap_sys_boot cap_sys_nice cap_sys_resource cap_sys_time cap_sys_tty_config cap_mknod cap_lease cap_audit_write cap_audit_control cap_setfcap cap_mac_override cap_mac_admin cap_syslog cap_wake_alarm cap_block_suspend cap_audit_read cap_perfmon cap_bpf cap_checkpoint_restore", "CleanResult": "success", "CollectMode": "inactive", "ConditionResult": "no", "ConditionTimestampMonotonic": "0", "ConfigurationDirectoryMode": "0755", "Conflicts": "shutdown.target", "ControlGroupId": "0", "ControlPID": "0", "CoredumpFilter": "0x33", "DefaultDependencies": "yes", "DefaultMemoryLow": "0", "DefaultMemoryMin": "0", "Delegate": "yes", "DelegateControllers": "cpu cpuacct cpuset io blkio memory devices pids bpf-firewall bpf-devices bpf-foreign bpf-socket-bind bpf-restrict-network-interfaces", "Description": "quadlet-pod-container.service", "DevicePolicy": "auto", "DynamicUser": "no", "Environment": "PODMAN_SYSTEMD_UNIT=quadlet-pod-container.service", "ExecMainCode": "0", "ExecMainExitTimestampMonotonic": "0", "ExecMainPID": "0", "ExecMainStartTimestampMonotonic": "0", "ExecMainStatus": "0", "ExecStart": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman run --name quadlet-pod-container --cidfile=/run/quadlet-pod-container.cid --replace --rm --cgroups=split --sdnotify=conmon -d --pod-id-file /run/quadlet-pod-pod-pod.pod-id quay.io/libpod/testimage:20210610 /bin/busybox-extras httpd -f -p 80 ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStartEx": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman run --name quadlet-pod-container --cidfile=/run/quadlet-pod-container.cid --replace --rm --cgroups=split --sdnotify=conmon -d --pod-id-file /run/quadlet-pod-pod-pod.pod-id quay.io/libpod/testimage:20210610 /bin/busybox-extras httpd -f -p 80 ; flags= ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStop": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman rm -v -f -i --cidfile=/run/quadlet-pod-container.cid ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStopEx": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman rm -v -f -i --cidfile=/run/quadlet-pod-container.cid ; flags= ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStopPost": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman rm -v -f -i --cidfile=/run/quadlet-pod-container.cid ; ignore_errors=yes ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStopPostEx": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman rm -v -f -i --cidfile=/run/quadlet-pod-container.cid ; flags=ignore-failure ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExitType": "main", "FailureAction": "none", "FileDescriptorStoreMax": "0", "FinalKillSignal": "9", "FragmentPath": "/run/systemd/generator/quadlet-pod-container.service", "FreezerState": "running", "GID": "[not set]", "GuessMainPID": "yes", "IOAccounting": "no", "IOReadBytes": "18446744073709551615", "IOReadOperations": "18446744073709551615", "IOSchedulingClass": "2", "IOSchedulingPriority": "4", "IOWeight": "[not set]", "IOWriteBytes": "18446744073709551615", "IOWriteOperations": "18446744073709551615", "IPAccounting": "no", "IPEgressBytes": "[no data]", "IPEgressPackets": "[no data]", "IPIngressBytes": "[no data]", "IPIngressPackets": "[no data]", "Id": "quadlet-pod-container.service", "IgnoreOnIsolate": "no", "IgnoreSIGPIPE": "yes", "InactiveEnterTimestampMonotonic": "0", "InactiveExitTimestampMonotonic": "0", "JobRunningTimeoutUSec": "infinity", "JobTimeoutAction": "none", "JobTimeoutUSec": "infinity", "KeyringMode": "private", "KillMode": "mixed", "KillSignal": "15", "LimitAS": "infinity", "LimitASSoft": "infinity", "LimitCORE": "infinity", "LimitCORESoft": "infinity", "LimitCPU": "infinity", "LimitCPUSoft": "infinity", "LimitDATA": "infinity", "LimitDATASoft": "infinity", "LimitFSIZE": "infinity", "LimitFSIZESoft": "infinity", "LimitLOCKS": "infinity", "LimitLOCKSSoft": "infinity", "LimitMEMLOCK": "8388608", "LimitMEMLOCKSoft": "8388608", "LimitMSGQUEUE": "819200", "LimitMSGQUEUESoft": "819200", "LimitNICE": "0", "LimitNICESoft": "0", "LimitNOFILE": "524288", "LimitNOFILESoft": "1024", "LimitNPROC": "13688", "LimitNPROCSoft": "13688", "LimitRSS": "infinity", "LimitRSSSoft": "infinity", "LimitRTPRIO": "0", "LimitRTPRIOSoft": "0", "LimitRTTIME": "infinity", "LimitRTTIMESoft": "infinity", "LimitSIGPENDING": "13688", "LimitSIGPENDINGSoft": "13688", "LimitSTACK": "infinity", "LimitSTACKSoft": "8388608", "LoadState": "loaded", "LockPersonality": "no", "LogLevelMax": "-1", "LogRateLimitBurst": "0", "LogRateLimitIntervalUSec": "0", "LogsDirectoryMode": "0755", "MainPID": "0", "ManagedOOMMemoryPressure": "auto", "ManagedOOMMemoryPressureLimit": "0", "ManagedOOMPreference": "none", "ManagedOOMSwap": "auto", "MemoryAccounting": "yes", "MemoryAvailable": "infinity", "MemoryCurrent": "[not set]", "MemoryDenyWriteExecute": "no", "MemoryHigh": "infinity", "MemoryLimit": "infinity", "MemoryLow": "0", "MemoryMax": "infinity", "MemoryMin": "0", "MemorySwapMax": "infinity", "MountAPIVFS": "no", "NFileDescriptorStore": "0", "NRestarts": "0", "NUMAPolicy": "n/a", "Names": "quadlet-pod-container.service", "NeedDaemonReload": "no", "Nice": "0", "NoNewPrivileges": "no", "NonBlocking": "no", "NotifyAccess": "all", "OOMPolicy": "continue", "OOMScoreAdjust": "0", "OnFailureJobMode": "replace", "OnSuccessJobMode": "fail", "Perpetual": "no", "PrivateDevices": "no", "PrivateIPC": "no", "PrivateMounts": "no", "PrivateNetwork": "no", "PrivateTmp": "no", "PrivateUsers": "no", "ProcSubset": "all", "ProtectClock": "no", "ProtectControlGroups": "no", "ProtectHome": "no", "ProtectHostname": "no", "ProtectKernelLogs": "no", "ProtectKernelModules": "no", "ProtectKernelTunables": "no", "ProtectProc": "default", "ProtectSystem": "no", "RefuseManualStart": "no", "RefuseManualStop": "no", "ReloadResult": "success", "ReloadSignal": "1", "RemainAfterExit": "no", "RemoveIPC": "no", "Requires": "system.slice -.mount sysinit.target", "RequiresMountsFor": "/run/containers", "Restart": "no", "RestartKillSignal": "15", "RestartUSec": "100ms", "RestrictNamespaces": "no", "RestrictRealtime": "no", "RestrictSUIDSGID": "no", "Result": "success", "RootDirectoryStartOnly": "no", "RuntimeDirectoryMode": "0755", "RuntimeDirectoryPreserve": "no", "RuntimeMaxUSec": "infinity", "RuntimeRandomizedExtraUSec": "0", "SameProcessGroup": "no", "SecureBits": "0", "SendSIGHUP": "no", "SendSIGKILL": "yes", "Slice": "system.slice", "SourcePath": "/etc/containers/systemd/quadlet-pod-container.container", "StandardError": "inherit", "StandardInput": "null", "StandardOutput": "journal", "StartLimitAction": "none", "StartLimitBurst": "5", "StartLimitIntervalUSec": "10s", "StartupBlockIOWeight": "[not set]", "StartupCPUShares": "[not set]", "StartupCPUWeight": "[not set]", "StartupIOWeight": "[not set]", "StateChangeTimestampMonotonic": "0", "StateDirectoryMode": "0755", "StatusErrno": "0", "StopWhenUnneeded": "no", "SubState": "dead", "SuccessAction": "none", "SyslogFacility": "3", "SyslogIdentifier": "quadlet-pod-container", "SyslogLevel": "6", "SyslogLevelPrefix": "yes", "SyslogPriority": "30", "SystemCallErrorNumber": "2147483646", "TTYReset": "no", "TTYVHangup": "no", "TTYVTDisallocate": "no", "TasksAccounting": "yes", "TasksCurrent": "[not set]", "TasksMax": "21900", "TimeoutAbortUSec": "1min 30s", "TimeoutCleanUSec": "infinity", "TimeoutStartFailureMode": "terminate", "TimeoutStartUSec": "1min 30s", "TimeoutStopFailureMode": "terminate", "TimeoutStopUSec": "1min 30s", "TimerSlackNSec": "50000", "Transient": "no", "Type": "notify", "UID": "[not set]", "UMask": "0022", "UnitFilePreset": "disabled", "UnitFileState": "generated", "UtmpMode": "init", "WantedBy": "multi-user.target quadlet-pod-pod-pod.service", "Wants": "network-online.target", "WatchdogSignal": "6", "WatchdogTimestampMonotonic": "0", "WatchdogUSec": "infinity" } } TASK [fedora.linux_system_roles.podman : Restart service] ********************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:131 Monday 07 July 2025 20:16:49 -0400 (0:00:00.624) 0:00:16.934 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_service_started is changed", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Cancel linger] ************************ task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:198 Monday 07 July 2025 20:16:49 -0400 (0:00:00.032) 0:00:16.967 *********** skipping: [managed-node2] => { "changed": false, "skipped_reason": "No items in the list" } TASK [fedora.linux_system_roles.podman : Handle credential files - absent] ***** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:204 Monday 07 July 2025 20:16:49 -0400 (0:00:00.028) 0:00:16.995 *********** skipping: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Handle certs.d files - absent] ******** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:213 Monday 07 July 2025 20:16:49 -0400 (0:00:00.026) 0:00:17.022 *********** skipping: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [Check files] ************************************************************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:40 Monday 07 July 2025 20:16:49 -0400 (0:00:00.044) 0:00:17.066 *********** ok: [managed-node2] => (item=quadlet-pod-container.container) => { "ansible_loop_var": "item", "changed": false, "cmd": [ "cat", "/etc/containers/systemd/quadlet-pod-container.container" ], "delta": "0:00:00.003540", "end": "2025-07-07 20:16:49.626687", "item": "quadlet-pod-container.container", "rc": 0, "start": "2025-07-07 20:16:49.623147" } STDOUT: # # Ansible managed # # system_role:podman [Install] WantedBy=default.target [Container] Image=quay.io/libpod/testimage:20210610 ContainerName=quadlet-pod-container Pod=quadlet-pod-pod.pod Exec=/bin/busybox-extras httpd -f -p 80 ok: [managed-node2] => (item=quadlet-pod-pod.pod) => { "ansible_loop_var": "item", "changed": false, "cmd": [ "cat", "/etc/containers/systemd/quadlet-pod-pod.pod" ], "delta": "0:00:00.003234", "end": "2025-07-07 20:16:49.954098", "item": "quadlet-pod-pod.pod", "rc": 0, "start": "2025-07-07 20:16:49.950864" } STDOUT: # # Ansible managed # # system_role:podman [Pod] PodName=quadlet-pod TASK [Check pod] *************************************************************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:49 Monday 07 July 2025 20:16:50 -0400 (0:00:00.726) 0:00:17.793 *********** ok: [managed-node2] => { "changed": false, "cmd": [ "podman", "pod", "inspect", "quadlet-pod", "--format", "{{range .Containers}}{{.Name}}\n{{end}}" ], "delta": "0:00:00.038845", "end": "2025-07-07 20:16:50.388559", "failed_when_result": false, "rc": 0, "start": "2025-07-07 20:16:50.349714" } STDOUT: quadlet-pod-infra quadlet-pod-container TASK [Create user for testing] ************************************************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:57 Monday 07 July 2025 20:16:50 -0400 (0:00:00.397) 0:00:18.190 *********** changed: [managed-node2] => { "changed": true, "comment": "", "create_home": true, "group": 2223, "home": "/home/user_quadlet_pod", "name": "user_quadlet_pod", "shell": "/bin/bash", "state": "present", "system": false, "uid": 2223 } TASK [Run the role - user] ***************************************************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:62 Monday 07 July 2025 20:16:51 -0400 (0:00:00.634) 0:00:18.825 *********** included: fedora.linux_system_roles.podman for managed-node2 TASK [fedora.linux_system_roles.podman : Set platform/version specific variables] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:3 Monday 07 July 2025 20:16:51 -0400 (0:00:00.094) 0:00:18.919 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Ensure ansible_facts used by role] **** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:3 Monday 07 July 2025 20:16:51 -0400 (0:00:00.052) 0:00:18.971 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_required_facts | difference(ansible_facts.keys() | list) | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Check if system is ostree] ************ task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:11 Monday 07 July 2025 20:16:51 -0400 (0:00:00.036) 0:00:19.008 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_is_ostree is defined", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set flag to indicate system is ostree] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:16 Monday 07 July 2025 20:16:51 -0400 (0:00:00.030) 0:00:19.039 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_is_ostree is defined", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Check if transactional-update exists in /sbin] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:23 Monday 07 July 2025 20:16:51 -0400 (0:00:00.031) 0:00:19.071 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_is_transactional is defined", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set flag if transactional-update exists] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:28 Monday 07 July 2025 20:16:51 -0400 (0:00:00.028) 0:00:19.100 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_is_transactional is defined", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set platform/version specific variables] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:32 Monday 07 July 2025 20:16:51 -0400 (0:00:00.031) 0:00:19.131 *********** ok: [managed-node2] => (item=RedHat.yml) => { "ansible_facts": { "__podman_packages": [ "podman", "shadow-utils-subid" ] }, "ansible_included_var_files": [ "/tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/vars/RedHat.yml" ], "ansible_loop_var": "item", "changed": false, "item": "RedHat.yml" } skipping: [managed-node2] => (item=CentOS.yml) => { "ansible_loop_var": "item", "changed": false, "false_condition": "__vars_file is file", "item": "CentOS.yml", "skip_reason": "Conditional result was False" } skipping: [managed-node2] => (item=CentOS_9.yml) => { "ansible_loop_var": "item", "changed": false, "false_condition": "__vars_file is file", "item": "CentOS_9.yml", "skip_reason": "Conditional result was False" } skipping: [managed-node2] => (item=CentOS_9.yml) => { "ansible_loop_var": "item", "changed": false, "false_condition": "__vars_file is file", "item": "CentOS_9.yml", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Gather the package facts] ************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:6 Monday 07 July 2025 20:16:51 -0400 (0:00:00.062) 0:00:19.194 *********** ok: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Enable copr if requested] ************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:10 Monday 07 July 2025 20:16:52 -0400 (0:00:00.789) 0:00:19.983 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_use_copr | d(false)", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Ensure required packages are installed] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:14 Monday 07 July 2025 20:16:52 -0400 (0:00:00.032) 0:00:20.015 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "(__podman_packages | difference(ansible_facts.packages)) | list | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Notify user that reboot is needed to apply changes] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:28 Monday 07 July 2025 20:16:52 -0400 (0:00:00.037) 0:00:20.053 *********** skipping: [managed-node2] => { "false_condition": "__podman_is_transactional | d(false)" } TASK [fedora.linux_system_roles.podman : Reboot transactional update systems] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:33 Monday 07 July 2025 20:16:52 -0400 (0:00:00.031) 0:00:20.084 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_is_transactional | d(false)", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if reboot is needed and not set] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:38 Monday 07 July 2025 20:16:52 -0400 (0:00:00.030) 0:00:20.114 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_is_transactional | d(false)", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get podman version] ******************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:46 Monday 07 July 2025 20:16:52 -0400 (0:00:00.066) 0:00:20.180 *********** ok: [managed-node2] => { "changed": false, "cmd": [ "podman", "--version" ], "delta": "0:00:00.026569", "end": "2025-07-07 20:16:52.763103", "rc": 0, "start": "2025-07-07 20:16:52.736534" } STDOUT: podman version 5.5.1 TASK [fedora.linux_system_roles.podman : Set podman version] ******************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:52 Monday 07 July 2025 20:16:52 -0400 (0:00:00.382) 0:00:20.563 *********** ok: [managed-node2] => { "ansible_facts": { "podman_version": "5.5.1" }, "changed": false } TASK [fedora.linux_system_roles.podman : Podman package version must be 4.2 or later] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:56 Monday 07 July 2025 20:16:52 -0400 (0:00:00.034) 0:00:20.597 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_version is version(\"4.2\", \"<\")", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Podman package version must be 4.4 or later for quadlet, secrets] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:63 Monday 07 July 2025 20:16:52 -0400 (0:00:00.030) 0:00:20.627 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_version is version(\"4.4\", \"<\")", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Podman package version must be 4.4 or later for quadlet, secrets] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:73 Monday 07 July 2025 20:16:52 -0400 (0:00:00.035) 0:00:20.663 *********** META: end_host conditional evaluated to False, continuing execution for managed-node2 skipping: [managed-node2] => { "skip_reason": "end_host conditional evaluated to False, continuing execution for managed-node2" } MSG: end_host conditional evaluated to false, continuing execution for managed-node2 TASK [fedora.linux_system_roles.podman : Podman package version must be 5.0 or later for Pod quadlets] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:80 Monday 07 July 2025 20:16:52 -0400 (0:00:00.037) 0:00:20.701 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_version is version(\"5.0\", \"<\")", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Podman package version must be 5.0 or later for Pod quadlets] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:96 Monday 07 July 2025 20:16:53 -0400 (0:00:00.049) 0:00:20.750 *********** META: end_host conditional evaluated to False, continuing execution for managed-node2 skipping: [managed-node2] => { "skip_reason": "end_host conditional evaluated to False, continuing execution for managed-node2" } MSG: end_host conditional evaluated to false, continuing execution for managed-node2 TASK [fedora.linux_system_roles.podman : Check user and group information] ***** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:109 Monday 07 July 2025 20:16:53 -0400 (0:00:00.051) 0:00:20.801 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Get user information] ***************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:2 Monday 07 July 2025 20:16:53 -0400 (0:00:00.061) 0:00:20.863 *********** ok: [managed-node2] => { "ansible_facts": { "getent_passwd": { "user_quadlet_pod": [ "x", "2223", "2223", "", "/home/user_quadlet_pod", "/bin/bash" ] } }, "changed": false } TASK [fedora.linux_system_roles.podman : Fail if user does not exist] ********** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:9 Monday 07 July 2025 20:16:53 -0400 (0:00:00.363) 0:00:21.226 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not ansible_facts[\"getent_passwd\"][__podman_user]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set group for podman user] ************ task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:16 Monday 07 July 2025 20:16:53 -0400 (0:00:00.036) 0:00:21.263 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_group": "2223" }, "changed": false } TASK [fedora.linux_system_roles.podman : See if getsubids exists] ************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:31 Monday 07 July 2025 20:16:53 -0400 (0:00:00.042) 0:00:21.306 *********** ok: [managed-node2] => { "changed": false, "stat": { "atime": 1751933455.3375134, "attr_flags": "", "attributes": [], "block_size": 4096, "blocks": 32, "charset": "binary", "checksum": "8bedde2dbca15219e1a3b95a68a8c0d26a92ba62", "ctime": 1751933428.1541803, "dev": 51713, "device_type": 0, "executable": true, "exists": true, "gid": 0, "gr_name": "root", "inode": 2118, "isblk": false, "ischr": false, "isdir": false, "isfifo": false, "isgid": false, "islnk": false, "isreg": true, "issock": false, "isuid": false, "mimetype": "application/x-pie-executable", "mode": "0755", "mtime": 1748273472.0, "nlink": 1, "path": "/usr/bin/getsubids", "pw_name": "root", "readable": true, "rgrp": true, "roth": true, "rusr": true, "size": 15496, "uid": 0, "version": "2386316427", "wgrp": false, "woth": false, "writeable": true, "wusr": true, "xgrp": true, "xoth": true, "xusr": true } } TASK [fedora.linux_system_roles.podman : Check with getsubids for user subuids] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:42 Monday 07 July 2025 20:16:53 -0400 (0:00:00.364) 0:00:21.670 *********** ok: [managed-node2] => { "changed": false, "cmd": [ "getsubids", "user_quadlet_pod" ], "delta": "0:00:00.003793", "end": "2025-07-07 20:16:54.235157", "rc": 0, "start": "2025-07-07 20:16:54.231364" } STDOUT: 0: user_quadlet_pod 165536 65536 TASK [fedora.linux_system_roles.podman : Check with getsubids for user subgids] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:47 Monday 07 July 2025 20:16:54 -0400 (0:00:00.365) 0:00:22.035 *********** ok: [managed-node2] => { "changed": false, "cmd": [ "getsubids", "-g", "user_quadlet_pod" ], "delta": "0:00:00.005967", "end": "2025-07-07 20:16:54.602285", "rc": 0, "start": "2025-07-07 20:16:54.596318" } STDOUT: 0: user_quadlet_pod 165536 65536 TASK [fedora.linux_system_roles.podman : Set user subuid and subgid info] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:52 Monday 07 July 2025 20:16:54 -0400 (0:00:00.369) 0:00:22.405 *********** ok: [managed-node2] => { "ansible_facts": { "podman_subgid_info": { "user_quadlet_pod": { "range": 65536, "start": 165536 } }, "podman_subuid_info": { "user_quadlet_pod": { "range": 65536, "start": 165536 } } }, "changed": false } TASK [fedora.linux_system_roles.podman : Get subuid file] ********************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:65 Monday 07 July 2025 20:16:54 -0400 (0:00:00.053) 0:00:22.458 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get subgid file] ********************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:70 Monday 07 July 2025 20:16:54 -0400 (0:00:00.032) 0:00:22.491 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set user subuid and subgid info] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:75 Monday 07 July 2025 20:16:54 -0400 (0:00:00.037) 0:00:22.529 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user not in subuid file] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:85 Monday 07 July 2025 20:16:54 -0400 (0:00:00.072) 0:00:22.601 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user not in subgid file] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:92 Monday 07 July 2025 20:16:54 -0400 (0:00:00.048) 0:00:22.650 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set config file paths] **************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:115 Monday 07 July 2025 20:16:54 -0400 (0:00:00.052) 0:00:22.702 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_container_conf_file": "/root/.config/containers/containers.conf.d/50-systemroles.conf", "__podman_parent_mode": "0700", "__podman_parent_path": "/root/.config/containers", "__podman_policy_json_file": "/root/.config/containers/policy.json", "__podman_registries_conf_file": "/root/.config/containers/registries.conf.d/50-systemroles.conf", "__podman_storage_conf_file": "/root/.config/containers/storage.conf" }, "changed": false } TASK [fedora.linux_system_roles.podman : Handle container.conf.d] ************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:126 Monday 07 July 2025 20:16:55 -0400 (0:00:00.047) 0:00:22.749 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_container_conf_d.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Ensure containers.d exists] *********** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_container_conf_d.yml:5 Monday 07 July 2025 20:16:55 -0400 (0:00:00.076) 0:00:22.826 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_containers_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Update container config file] ********* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_container_conf_d.yml:13 Monday 07 July 2025 20:16:55 -0400 (0:00:00.037) 0:00:22.863 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_containers_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Handle registries.conf.d] ************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:129 Monday 07 July 2025 20:16:55 -0400 (0:00:00.031) 0:00:22.895 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_registries_conf_d.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Ensure registries.d exists] *********** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_registries_conf_d.yml:5 Monday 07 July 2025 20:16:55 -0400 (0:00:00.058) 0:00:22.953 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_registries_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Update registries config file] ******** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_registries_conf_d.yml:13 Monday 07 July 2025 20:16:55 -0400 (0:00:00.031) 0:00:22.985 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_registries_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Handle storage.conf] ****************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:132 Monday 07 July 2025 20:16:55 -0400 (0:00:00.037) 0:00:23.023 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_storage_conf.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Ensure storage.conf parent dir exists] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_storage_conf.yml:7 Monday 07 July 2025 20:16:55 -0400 (0:00:00.074) 0:00:23.097 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_storage_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Update storage config file] *********** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_storage_conf.yml:15 Monday 07 July 2025 20:16:55 -0400 (0:00:00.042) 0:00:23.139 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_storage_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Handle policy.json] ******************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:135 Monday 07 July 2025 20:16:55 -0400 (0:00:00.035) 0:00:23.174 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_policy_json.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Ensure policy.json parent dir exists] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_policy_json.yml:8 Monday 07 July 2025 20:16:55 -0400 (0:00:00.062) 0:00:23.237 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_policy_json | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Stat the policy.json file] ************ task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_policy_json.yml:16 Monday 07 July 2025 20:16:55 -0400 (0:00:00.032) 0:00:23.269 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_policy_json | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get the existing policy.json] ********* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_policy_json.yml:21 Monday 07 July 2025 20:16:55 -0400 (0:00:00.030) 0:00:23.300 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_policy_json | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Write new policy.json file] *********** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_policy_json.yml:27 Monday 07 July 2025 20:16:55 -0400 (0:00:00.032) 0:00:23.333 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_policy_json | length > 0", "skip_reason": "Conditional result was False" } TASK [Manage firewall for specified ports] ************************************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:141 Monday 07 July 2025 20:16:55 -0400 (0:00:00.036) 0:00:23.369 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_firewall | length > 0", "skip_reason": "Conditional result was False" } TASK [Manage selinux for specified ports] ************************************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:148 Monday 07 July 2025 20:16:55 -0400 (0:00:00.107) 0:00:23.477 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_selinux_ports | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Keep track of users that need to cancel linger] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:155 Monday 07 July 2025 20:16:55 -0400 (0:00:00.058) 0:00:23.535 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_cancel_user_linger": [] }, "changed": false } TASK [fedora.linux_system_roles.podman : Handle certs.d files - present] ******* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:159 Monday 07 July 2025 20:16:55 -0400 (0:00:00.051) 0:00:23.586 *********** skipping: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Handle credential files - present] **** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:168 Monday 07 July 2025 20:16:55 -0400 (0:00:00.033) 0:00:23.619 *********** skipping: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Handle secrets] *********************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:177 Monday 07 July 2025 20:16:55 -0400 (0:00:00.039) 0:00:23.659 *********** skipping: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Handle Kubernetes specifications] ***** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:184 Monday 07 July 2025 20:16:55 -0400 (0:00:00.034) 0:00:23.693 *********** skipping: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Handle Quadlet specifications] ******** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:191 Monday 07 July 2025 20:16:55 -0400 (0:00:00.037) 0:00:23.730 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml for managed-node2 => (item=(censored due to no_log)) included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml for managed-node2 => (item=(censored due to no_log)) TASK [fedora.linux_system_roles.podman : Set per-container variables part 0] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:14 Monday 07 July 2025 20:16:56 -0400 (0:00:00.089) 0:00:23.820 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_quadlet_file_src": "", "__podman_quadlet_spec": { "Pod": { "PodName": "quadlet-pod" } }, "__podman_quadlet_str": "", "__podman_quadlet_template_src": "" }, "changed": false } TASK [fedora.linux_system_roles.podman : Set per-container variables part 1] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:25 Monday 07 July 2025 20:16:56 -0400 (0:00:00.042) 0:00:23.863 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_continue_if_pull_fails": false, "__podman_pull_image": true, "__podman_state": "created", "__podman_systemd_unit_scope": "", "__podman_user": "user_quadlet_pod" }, "changed": false } TASK [fedora.linux_system_roles.podman : Fail if no quadlet spec is given] ***** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:35 Monday 07 July 2025 20:16:56 -0400 (0:00:00.039) 0:00:23.902 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_quadlet_spec | length == 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set per-container variables part 2] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:48 Monday 07 July 2025 20:16:56 -0400 (0:00:00.033) 0:00:23.936 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_quadlet_name": "quadlet-pod-pod", "__podman_quadlet_type": "pod", "__podman_rootless": true }, "changed": false } TASK [fedora.linux_system_roles.podman : Check user and group information] ***** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:57 Monday 07 July 2025 20:16:56 -0400 (0:00:00.047) 0:00:23.983 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Get user information] ***************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:2 Monday 07 July 2025 20:16:56 -0400 (0:00:00.060) 0:00:24.044 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "'getent_passwd' not in ansible_facts or __podman_user not in ansible_facts['getent_passwd']", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user does not exist] ********** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:9 Monday 07 July 2025 20:16:56 -0400 (0:00:00.036) 0:00:24.080 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not ansible_facts[\"getent_passwd\"][__podman_user]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set group for podman user] ************ task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:16 Monday 07 July 2025 20:16:56 -0400 (0:00:00.034) 0:00:24.114 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_group": "2223" }, "changed": false } TASK [fedora.linux_system_roles.podman : See if getsubids exists] ************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:31 Monday 07 July 2025 20:16:56 -0400 (0:00:00.045) 0:00:24.159 *********** ok: [managed-node2] => { "changed": false, "stat": { "atime": 1751933455.3375134, "attr_flags": "", "attributes": [], "block_size": 4096, "blocks": 32, "charset": "binary", "checksum": "8bedde2dbca15219e1a3b95a68a8c0d26a92ba62", "ctime": 1751933428.1541803, "dev": 51713, "device_type": 0, "executable": true, "exists": true, "gid": 0, "gr_name": "root", "inode": 2118, "isblk": false, "ischr": false, "isdir": false, "isfifo": false, "isgid": false, "islnk": false, "isreg": true, "issock": false, "isuid": false, "mimetype": "application/x-pie-executable", "mode": "0755", "mtime": 1748273472.0, "nlink": 1, "path": "/usr/bin/getsubids", "pw_name": "root", "readable": true, "rgrp": true, "roth": true, "rusr": true, "size": 15496, "uid": 0, "version": "2386316427", "wgrp": false, "woth": false, "writeable": true, "wusr": true, "xgrp": true, "xoth": true, "xusr": true } } TASK [fedora.linux_system_roles.podman : Check with getsubids for user subuids] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:42 Monday 07 July 2025 20:16:56 -0400 (0:00:00.374) 0:00:24.534 *********** ok: [managed-node2] => { "changed": false, "cmd": [ "getsubids", "user_quadlet_pod" ], "delta": "0:00:00.003435", "end": "2025-07-07 20:16:57.098459", "rc": 0, "start": "2025-07-07 20:16:57.095024" } STDOUT: 0: user_quadlet_pod 165536 65536 TASK [fedora.linux_system_roles.podman : Check with getsubids for user subgids] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:47 Monday 07 July 2025 20:16:57 -0400 (0:00:00.364) 0:00:24.899 *********** ok: [managed-node2] => { "changed": false, "cmd": [ "getsubids", "-g", "user_quadlet_pod" ], "delta": "0:00:00.005619", "end": "2025-07-07 20:16:57.464380", "rc": 0, "start": "2025-07-07 20:16:57.458761" } STDOUT: 0: user_quadlet_pod 165536 65536 TASK [fedora.linux_system_roles.podman : Set user subuid and subgid info] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:52 Monday 07 July 2025 20:16:57 -0400 (0:00:00.365) 0:00:25.264 *********** ok: [managed-node2] => { "ansible_facts": { "podman_subgid_info": { "user_quadlet_pod": { "range": 65536, "start": 165536 } }, "podman_subuid_info": { "user_quadlet_pod": { "range": 65536, "start": 165536 } } }, "changed": false } TASK [fedora.linux_system_roles.podman : Get subuid file] ********************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:65 Monday 07 July 2025 20:16:57 -0400 (0:00:00.091) 0:00:25.356 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get subgid file] ********************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:70 Monday 07 July 2025 20:16:57 -0400 (0:00:00.033) 0:00:25.390 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set user subuid and subgid info] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:75 Monday 07 July 2025 20:16:57 -0400 (0:00:00.034) 0:00:25.424 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user not in subuid file] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:85 Monday 07 July 2025 20:16:57 -0400 (0:00:00.033) 0:00:25.458 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user not in subgid file] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:92 Monday 07 July 2025 20:16:57 -0400 (0:00:00.033) 0:00:25.492 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set per-container variables part 3] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:62 Monday 07 July 2025 20:16:57 -0400 (0:00:00.033) 0:00:25.525 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_activate_systemd_unit": true, "__podman_images_found": [], "__podman_kube_yamls_raw": "", "__podman_service_name": "quadlet-pod-pod-pod.service", "__podman_systemd_scope": "user", "__podman_user_home_dir": "/home/user_quadlet_pod", "__podman_xdg_runtime_dir": "/run/user/2223" }, "changed": false } TASK [fedora.linux_system_roles.podman : Set per-container variables part 4] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:73 Monday 07 July 2025 20:16:57 -0400 (0:00:00.066) 0:00:25.591 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_quadlet_path": "/home/user_quadlet_pod/.config/containers/systemd" }, "changed": false } TASK [fedora.linux_system_roles.podman : Get kube yaml contents] *************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:77 Monday 07 July 2025 20:16:57 -0400 (0:00:00.035) 0:00:25.626 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_kube_yamls_raw | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set per-container variables part 5] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:88 Monday 07 July 2025 20:16:57 -0400 (0:00:00.053) 0:00:25.680 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_images": [], "__podman_quadlet_file": "/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-pod.pod", "__podman_volumes": [] }, "changed": false } TASK [fedora.linux_system_roles.podman : Set per-container variables part 6] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:106 Monday 07 July 2025 20:16:58 -0400 (0:00:00.122) 0:00:25.803 *********** ok: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Cleanup quadlets] ********************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:113 Monday 07 July 2025 20:16:58 -0400 (0:00:00.064) 0:00:25.867 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_state == \"absent\"", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Create and update quadlets] *********** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:117 Monday 07 July 2025 20:16:58 -0400 (0:00:00.049) 0:00:25.917 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Manage linger] ************************ task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:2 Monday 07 July 2025 20:16:58 -0400 (0:00:00.107) 0:00:26.024 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/manage_linger.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Enable linger if needed] ************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/manage_linger.yml:12 Monday 07 July 2025 20:16:58 -0400 (0:00:00.059) 0:00:26.083 *********** changed: [managed-node2] => { "changed": true, "cmd": [ "loginctl", "enable-linger", "user_quadlet_pod" ], "delta": "0:00:00.013263", "end": "2025-07-07 20:16:58.656663", "rc": 0, "start": "2025-07-07 20:16:58.643400" } TASK [fedora.linux_system_roles.podman : Mark user as not yet needing to cancel linger] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/manage_linger.yml:18 Monday 07 July 2025 20:16:58 -0400 (0:00:00.395) 0:00:26.479 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_cancel_user_linger": [] }, "changed": false } TASK [fedora.linux_system_roles.podman : Mark user for possible linger cancel] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/manage_linger.yml:22 Monday 07 July 2025 20:16:58 -0400 (0:00:00.043) 0:00:26.523 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_item_state | d('present') == 'absent'", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Create host directories] ************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:7 Monday 07 July 2025 20:16:58 -0400 (0:00:00.047) 0:00:26.571 *********** skipping: [managed-node2] => { "changed": false, "skipped_reason": "No items in the list" } TASK [fedora.linux_system_roles.podman : Ensure container images are present] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:18 Monday 07 July 2025 20:16:58 -0400 (0:00:00.051) 0:00:26.623 *********** skipping: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Ensure the quadlet directory is present] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:39 Monday 07 July 2025 20:16:58 -0400 (0:00:00.035) 0:00:26.658 *********** changed: [managed-node2] => { "changed": true, "gid": 2223, "group": "user_quadlet_pod", "mode": "0755", "owner": "user_quadlet_pod", "path": "/home/user_quadlet_pod/.config/containers/systemd", "secontext": "unconfined_u:object_r:config_home_t:s0", "size": 6, "state": "directory", "uid": 2223 } TASK [fedora.linux_system_roles.podman : Ensure quadlet file is copied] ******** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:50 Monday 07 July 2025 20:16:59 -0400 (0:00:00.369) 0:00:27.027 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_quadlet_file_src | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Ensure quadlet file content is present] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:62 Monday 07 July 2025 20:16:59 -0400 (0:00:00.072) 0:00:27.100 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_quadlet_str | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Ensure quadlet file is present] ******* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:75 Monday 07 July 2025 20:16:59 -0400 (0:00:00.035) 0:00:27.135 *********** changed: [managed-node2] => { "changed": true, "checksum": "1884c880482430d8bf2e944b003734fb8b7a462d", "dest": "/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-pod.pod", "gid": 2223, "group": "user_quadlet_pod", "md5sum": "43c9e9c2ff3ad9cd27c1f2d12f03aee0", "mode": "0644", "owner": "user_quadlet_pod", "secontext": "unconfined_u:object_r:config_home_t:s0", "size": 70, "src": "/root/.ansible/tmp/ansible-tmp-1751933819.4453251-20404-152886448732131/.source.pod", "state": "file", "uid": 2223 } TASK [fedora.linux_system_roles.podman : Reload systemctl] ********************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:87 Monday 07 July 2025 20:17:00 -0400 (0:00:00.687) 0:00:27.823 *********** [WARNING]: Module remote_tmp /home/user_quadlet_pod/.ansible/tmp did not exist and was created with a mode of 0700, this may cause issues when running as another user. To avoid this, create the remote_tmp dir with the correct permissions manually ok: [managed-node2] => { "changed": false, "name": null, "status": {} } TASK [fedora.linux_system_roles.podman : Start service] ************************ task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:115 Monday 07 July 2025 20:17:00 -0400 (0:00:00.560) 0:00:28.383 *********** changed: [managed-node2] => { "changed": true, "name": "quadlet-pod-pod-pod.service", "state": "started", "status": { "AccessSELinuxContext": "unconfined_u:object_r:user_tmp_t:s0", "ActiveEnterTimestampMonotonic": "0", "ActiveExitTimestampMonotonic": "0", "ActiveState": "inactive", "After": "podman-user-wait-network-online.service app.slice basic.target run-user-2223.mount -.mount", "AllowIsolate": "no", "AssertResult": "no", "AssertTimestampMonotonic": "0", "Before": "shutdown.target", "BlockIOAccounting": "no", "BlockIOWeight": "[not set]", "CPUAccounting": "yes", "CPUAffinityFromNUMA": "no", "CPUQuotaPerSecUSec": "infinity", "CPUQuotaPeriodUSec": "infinity", "CPUSchedulingPolicy": "0", "CPUSchedulingPriority": "0", "CPUSchedulingResetOnFork": "no", "CPUShares": "[not set]", "CPUUsageNSec": "[not set]", "CPUWeight": "[not set]", "CacheDirectoryMode": "0755", "CanFreeze": "yes", "CanIsolate": "no", "CanReload": "no", "CanStart": "yes", "CanStop": "yes", "CapabilityBoundingSet": "cap_chown cap_dac_override cap_dac_read_search cap_fowner cap_fsetid cap_kill cap_setgid cap_setuid cap_setpcap cap_linux_immutable cap_net_bind_service cap_net_broadcast cap_net_admin cap_net_raw cap_ipc_lock cap_ipc_owner cap_sys_module cap_sys_rawio cap_sys_chroot cap_sys_ptrace cap_sys_pacct cap_sys_admin cap_sys_boot cap_sys_nice cap_sys_resource cap_sys_time cap_sys_tty_config cap_mknod cap_lease cap_audit_write cap_audit_control cap_setfcap cap_mac_override cap_mac_admin cap_syslog cap_wake_alarm cap_block_suspend cap_audit_read cap_perfmon cap_bpf cap_checkpoint_restore", "CleanResult": "success", "CollectMode": "inactive", "ConditionResult": "no", "ConditionTimestampMonotonic": "0", "ConfigurationDirectoryMode": "0755", "Conflicts": "shutdown.target", "ControlGroupId": "0", "ControlPID": "0", "CoredumpFilter": "0x33", "DefaultDependencies": "yes", "DefaultMemoryLow": "0", "DefaultMemoryMin": "0", "Delegate": "no", "Description": "quadlet-pod-pod-pod.service", "DevicePolicy": "auto", "DynamicUser": "no", "Environment": "PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service", "ExecMainCode": "0", "ExecMainExitTimestampMonotonic": "0", "ExecMainPID": "0", "ExecMainStartTimestampMonotonic": "0", "ExecMainStatus": "0", "ExecStart": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman pod start --pod-id-file=/run/user/2223/quadlet-pod-pod-pod.pod-id ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStartEx": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman pod start --pod-id-file=/run/user/2223/quadlet-pod-pod-pod.pod-id ; flags= ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStartPre": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman pod create --infra-conmon-pidfile=/run/user/2223/quadlet-pod-pod-pod.pid --pod-id-file=/run/user/2223/quadlet-pod-pod-pod.pod-id --exit-policy=stop --replace --infra-name quadlet-pod-infra --name quadlet-pod ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStartPreEx": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman pod create --infra-conmon-pidfile=/run/user/2223/quadlet-pod-pod-pod.pid --pod-id-file=/run/user/2223/quadlet-pod-pod-pod.pod-id --exit-policy=stop --replace --infra-name quadlet-pod-infra --name quadlet-pod ; flags= ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStop": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman pod stop --pod-id-file=/run/user/2223/quadlet-pod-pod-pod.pod-id --ignore --time=10 ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStopEx": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman pod stop --pod-id-file=/run/user/2223/quadlet-pod-pod-pod.pod-id --ignore --time=10 ; flags= ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStopPost": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman pod rm --pod-id-file=/run/user/2223/quadlet-pod-pod-pod.pod-id --ignore --force ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStopPostEx": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman pod rm --pod-id-file=/run/user/2223/quadlet-pod-pod-pod.pod-id --ignore --force ; flags= ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExitType": "main", "FailureAction": "none", "FileDescriptorStoreMax": "0", "FinalKillSignal": "9", "FragmentPath": "/run/user/2223/systemd/generator/quadlet-pod-pod-pod.service", "FreezerState": "running", "GID": "[not set]", "GuessMainPID": "yes", "IOAccounting": "no", "IOReadBytes": "18446744073709551615", "IOReadOperations": "18446744073709551615", "IOSchedulingClass": "2", "IOSchedulingPriority": "4", "IOWeight": "[not set]", "IOWriteBytes": "18446744073709551615", "IOWriteOperations": "18446744073709551615", "IPAccounting": "no", "IPEgressBytes": "[no data]", "IPEgressPackets": "[no data]", "IPIngressBytes": "[no data]", "IPIngressPackets": "[no data]", "Id": "quadlet-pod-pod-pod.service", "IgnoreOnIsolate": "no", "IgnoreSIGPIPE": "yes", "InactiveEnterTimestampMonotonic": "0", "InactiveExitTimestampMonotonic": "0", "JobRunningTimeoutUSec": "infinity", "JobTimeoutAction": "none", "JobTimeoutUSec": "infinity", "KeyringMode": "inherit", "KillMode": "control-group", "KillSignal": "15", "LimitAS": "infinity", "LimitASSoft": "infinity", "LimitCORE": "infinity", "LimitCORESoft": "infinity", "LimitCPU": "infinity", "LimitCPUSoft": "infinity", "LimitDATA": "infinity", "LimitDATASoft": "infinity", "LimitFSIZE": "infinity", "LimitFSIZESoft": "infinity", "LimitLOCKS": "infinity", "LimitLOCKSSoft": "infinity", "LimitMEMLOCK": "8388608", "LimitMEMLOCKSoft": "8388608", "LimitMSGQUEUE": "819200", "LimitMSGQUEUESoft": "819200", "LimitNICE": "0", "LimitNICESoft": "0", "LimitNOFILE": "524288", "LimitNOFILESoft": "1024", "LimitNPROC": "13688", "LimitNPROCSoft": "13688", "LimitRSS": "infinity", "LimitRSSSoft": "infinity", "LimitRTPRIO": "0", "LimitRTPRIOSoft": "0", "LimitRTTIME": "infinity", "LimitRTTIMESoft": "infinity", "LimitSIGPENDING": "13688", "LimitSIGPENDINGSoft": "13688", "LimitSTACK": "infinity", "LimitSTACKSoft": "8388608", "LoadState": "loaded", "LockPersonality": "no", "LogLevelMax": "-1", "LogRateLimitBurst": "0", "LogRateLimitIntervalUSec": "0", "LogsDirectoryMode": "0755", "MainPID": "0", "ManagedOOMMemoryPressure": "auto", "ManagedOOMMemoryPressureLimit": "0", "ManagedOOMPreference": "none", "ManagedOOMSwap": "auto", "MemoryAccounting": "yes", "MemoryAvailable": "infinity", "MemoryCurrent": "[not set]", "MemoryDenyWriteExecute": "no", "MemoryHigh": "infinity", "MemoryLimit": "infinity", "MemoryLow": "0", "MemoryMax": "infinity", "MemoryMin": "0", "MemorySwapMax": "infinity", "MountAPIVFS": "no", "NFileDescriptorStore": "0", "NRestarts": "0", "NUMAPolicy": "n/a", "Names": "quadlet-pod-pod-pod.service", "NeedDaemonReload": "no", "Nice": "0", "NoNewPrivileges": "no", "NonBlocking": "no", "NotifyAccess": "none", "OOMPolicy": "stop", "OOMScoreAdjust": "200", "OnFailureJobMode": "replace", "OnSuccessJobMode": "fail", "PIDFile": "/run/user/2223/quadlet-pod-pod-pod.pid", "Perpetual": "no", "PrivateDevices": "no", "PrivateIPC": "no", "PrivateMounts": "no", "PrivateNetwork": "no", "PrivateTmp": "no", "PrivateUsers": "no", "ProcSubset": "all", "ProtectClock": "no", "ProtectControlGroups": "no", "ProtectHome": "no", "ProtectHostname": "no", "ProtectKernelLogs": "no", "ProtectKernelModules": "no", "ProtectKernelTunables": "no", "ProtectProc": "default", "ProtectSystem": "no", "RefuseManualStart": "no", "RefuseManualStop": "no", "ReloadResult": "success", "ReloadSignal": "1", "RemainAfterExit": "no", "RemoveIPC": "no", "Requires": "basic.target app.slice", "RequiresMountsFor": "/run/user/2223/containers", "Restart": "on-failure", "RestartKillSignal": "15", "RestartUSec": "100ms", "RestrictNamespaces": "no", "RestrictRealtime": "no", "RestrictSUIDSGID": "no", "Result": "success", "RootDirectoryStartOnly": "no", "RuntimeDirectoryMode": "0755", "RuntimeDirectoryPreserve": "no", "RuntimeMaxUSec": "infinity", "RuntimeRandomizedExtraUSec": "0", "SameProcessGroup": "no", "SecureBits": "0", "SendSIGHUP": "no", "SendSIGKILL": "yes", "Slice": "app.slice", "SourcePath": "/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-pod.pod", "StandardError": "inherit", "StandardInput": "null", "StandardOutput": "journal", "StartLimitAction": "none", "StartLimitBurst": "5", "StartLimitIntervalUSec": "10s", "StartupBlockIOWeight": "[not set]", "StartupCPUShares": "[not set]", "StartupCPUWeight": "[not set]", "StartupIOWeight": "[not set]", "StateChangeTimestampMonotonic": "0", "StateDirectoryMode": "0755", "StatusErrno": "0", "StopWhenUnneeded": "no", "SubState": "dead", "SuccessAction": "none", "SyslogFacility": "3", "SyslogIdentifier": "quadlet-pod-pod-pod", "SyslogLevel": "6", "SyslogLevelPrefix": "yes", "SyslogPriority": "30", "SystemCallErrorNumber": "2147483646", "TTYReset": "no", "TTYVHangup": "no", "TTYVTDisallocate": "no", "TasksAccounting": "yes", "TasksCurrent": "[not set]", "TasksMax": "21900", "TimeoutAbortUSec": "1min 30s", "TimeoutCleanUSec": "infinity", "TimeoutStartFailureMode": "terminate", "TimeoutStartUSec": "1min 30s", "TimeoutStopFailureMode": "terminate", "TimeoutStopUSec": "1min 30s", "TimerSlackNSec": "50000", "Transient": "no", "Type": "forking", "UID": "[not set]", "UMask": "0022", "UnitFilePreset": "disabled", "UnitFileState": "generated", "UtmpMode": "init", "Wants": "podman-user-wait-network-online.service", "WatchdogSignal": "6", "WatchdogTimestampMonotonic": "0", "WatchdogUSec": "infinity", "WorkingDirectory": "!/home/user_quadlet_pod" } } TASK [fedora.linux_system_roles.podman : Restart service] ********************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:131 Monday 07 July 2025 20:17:01 -0400 (0:00:00.866) 0:00:29.250 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_service_started is changed", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set per-container variables part 0] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:14 Monday 07 July 2025 20:17:01 -0400 (0:00:00.035) 0:00:29.286 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_quadlet_file_src": "", "__podman_quadlet_spec": { "Container": { "ContainerName": "quadlet-pod-container", "Exec": "/bin/busybox-extras httpd -f -p 80", "Image": "quay.io/libpod/testimage:20210610", "Pod": "quadlet-pod-pod.pod" }, "Install": { "WantedBy": "default.target" } }, "__podman_quadlet_str": "", "__podman_quadlet_template_src": "" }, "changed": false } TASK [fedora.linux_system_roles.podman : Set per-container variables part 1] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:25 Monday 07 July 2025 20:17:01 -0400 (0:00:00.043) 0:00:29.329 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_continue_if_pull_fails": false, "__podman_pull_image": true, "__podman_state": "created", "__podman_systemd_unit_scope": "", "__podman_user": "user_quadlet_pod" }, "changed": false } TASK [fedora.linux_system_roles.podman : Fail if no quadlet spec is given] ***** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:35 Monday 07 July 2025 20:17:01 -0400 (0:00:00.043) 0:00:29.373 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_quadlet_spec | length == 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set per-container variables part 2] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:48 Monday 07 July 2025 20:17:01 -0400 (0:00:00.032) 0:00:29.406 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_quadlet_name": "quadlet-pod-container", "__podman_quadlet_type": "container", "__podman_rootless": true }, "changed": false } TASK [fedora.linux_system_roles.podman : Check user and group information] ***** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:57 Monday 07 July 2025 20:17:01 -0400 (0:00:00.051) 0:00:29.457 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Get user information] ***************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:2 Monday 07 July 2025 20:17:01 -0400 (0:00:00.061) 0:00:29.518 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "'getent_passwd' not in ansible_facts or __podman_user not in ansible_facts['getent_passwd']", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user does not exist] ********** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:9 Monday 07 July 2025 20:17:01 -0400 (0:00:00.036) 0:00:29.555 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not ansible_facts[\"getent_passwd\"][__podman_user]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set group for podman user] ************ task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:16 Monday 07 July 2025 20:17:01 -0400 (0:00:00.037) 0:00:29.593 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_group": "2223" }, "changed": false } TASK [fedora.linux_system_roles.podman : See if getsubids exists] ************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:31 Monday 07 July 2025 20:17:01 -0400 (0:00:00.049) 0:00:29.643 *********** ok: [managed-node2] => { "changed": false, "stat": { "atime": 1751933455.3375134, "attr_flags": "", "attributes": [], "block_size": 4096, "blocks": 32, "charset": "binary", "checksum": "8bedde2dbca15219e1a3b95a68a8c0d26a92ba62", "ctime": 1751933428.1541803, "dev": 51713, "device_type": 0, "executable": true, "exists": true, "gid": 0, "gr_name": "root", "inode": 2118, "isblk": false, "ischr": false, "isdir": false, "isfifo": false, "isgid": false, "islnk": false, "isreg": true, "issock": false, "isuid": false, "mimetype": "application/x-pie-executable", "mode": "0755", "mtime": 1748273472.0, "nlink": 1, "path": "/usr/bin/getsubids", "pw_name": "root", "readable": true, "rgrp": true, "roth": true, "rusr": true, "size": 15496, "uid": 0, "version": "2386316427", "wgrp": false, "woth": false, "writeable": true, "wusr": true, "xgrp": true, "xoth": true, "xusr": true } } TASK [fedora.linux_system_roles.podman : Check with getsubids for user subuids] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:42 Monday 07 July 2025 20:17:02 -0400 (0:00:00.371) 0:00:30.014 *********** ok: [managed-node2] => { "changed": false, "cmd": [ "getsubids", "user_quadlet_pod" ], "delta": "0:00:00.004112", "end": "2025-07-07 20:17:02.579450", "rc": 0, "start": "2025-07-07 20:17:02.575338" } STDOUT: 0: user_quadlet_pod 165536 65536 TASK [fedora.linux_system_roles.podman : Check with getsubids for user subgids] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:47 Monday 07 July 2025 20:17:02 -0400 (0:00:00.368) 0:00:30.383 *********** ok: [managed-node2] => { "changed": false, "cmd": [ "getsubids", "-g", "user_quadlet_pod" ], "delta": "0:00:00.006351", "end": "2025-07-07 20:17:02.954224", "rc": 0, "start": "2025-07-07 20:17:02.947873" } STDOUT: 0: user_quadlet_pod 165536 65536 TASK [fedora.linux_system_roles.podman : Set user subuid and subgid info] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:52 Monday 07 July 2025 20:17:03 -0400 (0:00:00.371) 0:00:30.755 *********** ok: [managed-node2] => { "ansible_facts": { "podman_subgid_info": { "user_quadlet_pod": { "range": 65536, "start": 165536 } }, "podman_subuid_info": { "user_quadlet_pod": { "range": 65536, "start": 165536 } } }, "changed": false } TASK [fedora.linux_system_roles.podman : Get subuid file] ********************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:65 Monday 07 July 2025 20:17:03 -0400 (0:00:00.047) 0:00:30.802 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get subgid file] ********************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:70 Monday 07 July 2025 20:17:03 -0400 (0:00:00.034) 0:00:30.836 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set user subuid and subgid info] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:75 Monday 07 July 2025 20:17:03 -0400 (0:00:00.032) 0:00:30.868 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user not in subuid file] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:85 Monday 07 July 2025 20:17:03 -0400 (0:00:00.077) 0:00:30.946 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user not in subgid file] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:92 Monday 07 July 2025 20:17:03 -0400 (0:00:00.037) 0:00:30.983 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set per-container variables part 3] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:62 Monday 07 July 2025 20:17:03 -0400 (0:00:00.057) 0:00:31.041 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_activate_systemd_unit": true, "__podman_images_found": [ "quay.io/libpod/testimage:20210610" ], "__podman_kube_yamls_raw": "", "__podman_service_name": "quadlet-pod-container.service", "__podman_systemd_scope": "user", "__podman_user_home_dir": "/home/user_quadlet_pod", "__podman_xdg_runtime_dir": "/run/user/2223" }, "changed": false } TASK [fedora.linux_system_roles.podman : Set per-container variables part 4] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:73 Monday 07 July 2025 20:17:03 -0400 (0:00:00.075) 0:00:31.116 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_quadlet_path": "/home/user_quadlet_pod/.config/containers/systemd" }, "changed": false } TASK [fedora.linux_system_roles.podman : Get kube yaml contents] *************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:77 Monday 07 July 2025 20:17:03 -0400 (0:00:00.041) 0:00:31.157 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_kube_yamls_raw | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set per-container variables part 5] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:88 Monday 07 July 2025 20:17:03 -0400 (0:00:00.042) 0:00:31.199 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_images": [ "quay.io/libpod/testimage:20210610" ], "__podman_quadlet_file": "/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-container.container", "__podman_volumes": [] }, "changed": false } TASK [fedora.linux_system_roles.podman : Set per-container variables part 6] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:106 Monday 07 July 2025 20:17:03 -0400 (0:00:00.090) 0:00:31.290 *********** ok: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Cleanup quadlets] ********************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:113 Monday 07 July 2025 20:17:03 -0400 (0:00:00.039) 0:00:31.329 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_state == \"absent\"", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Create and update quadlets] *********** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:117 Monday 07 July 2025 20:17:03 -0400 (0:00:00.031) 0:00:31.361 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Manage linger] ************************ task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:2 Monday 07 July 2025 20:17:03 -0400 (0:00:00.067) 0:00:31.429 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/manage_linger.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Enable linger if needed] ************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/manage_linger.yml:12 Monday 07 July 2025 20:17:03 -0400 (0:00:00.053) 0:00:31.483 *********** ok: [managed-node2] => { "changed": false, "cmd": [ "loginctl", "enable-linger", "user_quadlet_pod" ], "delta": null, "end": null, "rc": 0, "start": null } STDOUT: skipped, since /var/lib/systemd/linger/user_quadlet_pod exists MSG: Did not run command since '/var/lib/systemd/linger/user_quadlet_pod' exists TASK [fedora.linux_system_roles.podman : Mark user as not yet needing to cancel linger] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/manage_linger.yml:18 Monday 07 July 2025 20:17:04 -0400 (0:00:00.369) 0:00:31.852 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_cancel_user_linger": [] }, "changed": false } TASK [fedora.linux_system_roles.podman : Mark user for possible linger cancel] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/manage_linger.yml:22 Monday 07 July 2025 20:17:04 -0400 (0:00:00.036) 0:00:31.888 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_item_state | d('present') == 'absent'", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Create host directories] ************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:7 Monday 07 July 2025 20:17:04 -0400 (0:00:00.034) 0:00:31.922 *********** skipping: [managed-node2] => { "changed": false, "skipped_reason": "No items in the list" } TASK [fedora.linux_system_roles.podman : Ensure container images are present] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:18 Monday 07 July 2025 20:17:04 -0400 (0:00:00.027) 0:00:31.950 *********** changed: [managed-node2] => (item=None) => { "attempts": 1, "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": true } changed: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": true } TASK [fedora.linux_system_roles.podman : Ensure the quadlet directory is present] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:39 Monday 07 July 2025 20:17:05 -0400 (0:00:01.193) 0:00:33.143 *********** ok: [managed-node2] => { "changed": false, "gid": 2223, "group": "user_quadlet_pod", "mode": "0755", "owner": "user_quadlet_pod", "path": "/home/user_quadlet_pod/.config/containers/systemd", "secontext": "unconfined_u:object_r:config_home_t:s0", "size": 33, "state": "directory", "uid": 2223 } TASK [fedora.linux_system_roles.podman : Ensure quadlet file is copied] ******** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:50 Monday 07 July 2025 20:17:05 -0400 (0:00:00.422) 0:00:33.565 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_quadlet_file_src | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Ensure quadlet file content is present] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:62 Monday 07 July 2025 20:17:05 -0400 (0:00:00.055) 0:00:33.621 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_quadlet_str | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Ensure quadlet file is present] ******* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:75 Monday 07 July 2025 20:17:05 -0400 (0:00:00.062) 0:00:33.684 *********** changed: [managed-node2] => { "changed": true, "checksum": "f0b5c8159fc3c65bf9310a371751609e4c1ba4c3", "dest": "/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-container.container", "gid": 2223, "group": "user_quadlet_pod", "md5sum": "daaf6e904ff3c17edeb801084cfe256f", "mode": "0644", "owner": "user_quadlet_pod", "secontext": "unconfined_u:object_r:config_home_t:s0", "size": 230, "src": "/root/.ansible/tmp/ansible-tmp-1751933826.0010831-20615-171147992892008/.source.container", "state": "file", "uid": 2223 } TASK [fedora.linux_system_roles.podman : Reload systemctl] ********************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:87 Monday 07 July 2025 20:17:06 -0400 (0:00:00.737) 0:00:34.421 *********** ok: [managed-node2] => { "changed": false, "name": null, "status": {} } TASK [fedora.linux_system_roles.podman : Start service] ************************ task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:115 Monday 07 July 2025 20:17:07 -0400 (0:00:00.573) 0:00:34.995 *********** changed: [managed-node2] => { "changed": true, "name": "quadlet-pod-container.service", "state": "started", "status": { "AccessSELinuxContext": "unconfined_u:object_r:user_tmp_t:s0", "ActiveEnterTimestampMonotonic": "0", "ActiveExitTimestampMonotonic": "0", "ActiveState": "inactive", "After": "run-user-2223.mount app.slice podman-user-wait-network-online.service basic.target quadlet-pod-pod-pod.service -.mount", "AllowIsolate": "no", "AssertResult": "no", "AssertTimestampMonotonic": "0", "Before": "shutdown.target default.target", "BindsTo": "quadlet-pod-pod-pod.service", "BlockIOAccounting": "no", "BlockIOWeight": "[not set]", "CPUAccounting": "yes", "CPUAffinityFromNUMA": "no", "CPUQuotaPerSecUSec": "infinity", "CPUQuotaPeriodUSec": "infinity", "CPUSchedulingPolicy": "0", "CPUSchedulingPriority": "0", "CPUSchedulingResetOnFork": "no", "CPUShares": "[not set]", "CPUUsageNSec": "[not set]", "CPUWeight": "[not set]", "CacheDirectoryMode": "0755", "CanFreeze": "yes", "CanIsolate": "no", "CanReload": "no", "CanStart": "yes", "CanStop": "yes", "CapabilityBoundingSet": "cap_chown cap_dac_override cap_dac_read_search cap_fowner cap_fsetid cap_kill cap_setgid cap_setuid cap_setpcap cap_linux_immutable cap_net_bind_service cap_net_broadcast cap_net_admin cap_net_raw cap_ipc_lock cap_ipc_owner cap_sys_module cap_sys_rawio cap_sys_chroot cap_sys_ptrace cap_sys_pacct cap_sys_admin cap_sys_boot cap_sys_nice cap_sys_resource cap_sys_time cap_sys_tty_config cap_mknod cap_lease cap_audit_write cap_audit_control cap_setfcap cap_mac_override cap_mac_admin cap_syslog cap_wake_alarm cap_block_suspend cap_audit_read cap_perfmon cap_bpf cap_checkpoint_restore", "CleanResult": "success", "CollectMode": "inactive", "ConditionResult": "no", "ConditionTimestampMonotonic": "0", "ConfigurationDirectoryMode": "0755", "Conflicts": "shutdown.target", "ControlGroupId": "0", "ControlPID": "0", "CoredumpFilter": "0x33", "DefaultDependencies": "yes", "DefaultMemoryLow": "0", "DefaultMemoryMin": "0", "Delegate": "yes", "DelegateControllers": "cpu cpuacct cpuset io blkio memory devices pids bpf-firewall bpf-devices bpf-foreign bpf-socket-bind bpf-restrict-network-interfaces", "Description": "quadlet-pod-container.service", "DevicePolicy": "auto", "DynamicUser": "no", "Environment": "PODMAN_SYSTEMD_UNIT=quadlet-pod-container.service", "ExecMainCode": "0", "ExecMainExitTimestampMonotonic": "0", "ExecMainPID": "0", "ExecMainStartTimestampMonotonic": "0", "ExecMainStatus": "0", "ExecStart": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman run --name quadlet-pod-container --cidfile=/run/user/2223/quadlet-pod-container.cid --replace --rm --cgroups=split --sdnotify=conmon -d --pod-id-file /run/user/2223/quadlet-pod-pod-pod.pod-id quay.io/libpod/testimage:20210610 /bin/busybox-extras httpd -f -p 80 ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStartEx": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman run --name quadlet-pod-container --cidfile=/run/user/2223/quadlet-pod-container.cid --replace --rm --cgroups=split --sdnotify=conmon -d --pod-id-file /run/user/2223/quadlet-pod-pod-pod.pod-id quay.io/libpod/testimage:20210610 /bin/busybox-extras httpd -f -p 80 ; flags= ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStop": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman rm -v -f -i --cidfile=/run/user/2223/quadlet-pod-container.cid ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStopEx": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman rm -v -f -i --cidfile=/run/user/2223/quadlet-pod-container.cid ; flags= ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStopPost": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman rm -v -f -i --cidfile=/run/user/2223/quadlet-pod-container.cid ; ignore_errors=yes ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStopPostEx": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman rm -v -f -i --cidfile=/run/user/2223/quadlet-pod-container.cid ; flags=ignore-failure ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExitType": "main", "FailureAction": "none", "FileDescriptorStoreMax": "0", "FinalKillSignal": "9", "FragmentPath": "/run/user/2223/systemd/generator/quadlet-pod-container.service", "FreezerState": "running", "GID": "[not set]", "GuessMainPID": "yes", "IOAccounting": "no", "IOReadBytes": "18446744073709551615", "IOReadOperations": "18446744073709551615", "IOSchedulingClass": "2", "IOSchedulingPriority": "4", "IOWeight": "[not set]", "IOWriteBytes": "18446744073709551615", "IOWriteOperations": "18446744073709551615", "IPAccounting": "no", "IPEgressBytes": "[no data]", "IPEgressPackets": "[no data]", "IPIngressBytes": "[no data]", "IPIngressPackets": "[no data]", "Id": "quadlet-pod-container.service", "IgnoreOnIsolate": "no", "IgnoreSIGPIPE": "yes", "InactiveEnterTimestampMonotonic": "0", "InactiveExitTimestampMonotonic": "0", "JobRunningTimeoutUSec": "infinity", "JobTimeoutAction": "none", "JobTimeoutUSec": "infinity", "KeyringMode": "inherit", "KillMode": "mixed", "KillSignal": "15", "LimitAS": "infinity", "LimitASSoft": "infinity", "LimitCORE": "infinity", "LimitCORESoft": "infinity", "LimitCPU": "infinity", "LimitCPUSoft": "infinity", "LimitDATA": "infinity", "LimitDATASoft": "infinity", "LimitFSIZE": "infinity", "LimitFSIZESoft": "infinity", "LimitLOCKS": "infinity", "LimitLOCKSSoft": "infinity", "LimitMEMLOCK": "8388608", "LimitMEMLOCKSoft": "8388608", "LimitMSGQUEUE": "819200", "LimitMSGQUEUESoft": "819200", "LimitNICE": "0", "LimitNICESoft": "0", "LimitNOFILE": "524288", "LimitNOFILESoft": "1024", "LimitNPROC": "13688", "LimitNPROCSoft": "13688", "LimitRSS": "infinity", "LimitRSSSoft": "infinity", "LimitRTPRIO": "0", "LimitRTPRIOSoft": "0", "LimitRTTIME": "infinity", "LimitRTTIMESoft": "infinity", "LimitSIGPENDING": "13688", "LimitSIGPENDINGSoft": "13688", "LimitSTACK": "infinity", "LimitSTACKSoft": "8388608", "LoadState": "loaded", "LockPersonality": "no", "LogLevelMax": "-1", "LogRateLimitBurst": "0", "LogRateLimitIntervalUSec": "0", "LogsDirectoryMode": "0755", "MainPID": "0", "ManagedOOMMemoryPressure": "auto", "ManagedOOMMemoryPressureLimit": "0", "ManagedOOMPreference": "none", "ManagedOOMSwap": "auto", "MemoryAccounting": "yes", "MemoryAvailable": "infinity", "MemoryCurrent": "[not set]", "MemoryDenyWriteExecute": "no", "MemoryHigh": "infinity", "MemoryLimit": "infinity", "MemoryLow": "0", "MemoryMax": "infinity", "MemoryMin": "0", "MemorySwapMax": "infinity", "MountAPIVFS": "no", "NFileDescriptorStore": "0", "NRestarts": "0", "NUMAPolicy": "n/a", "Names": "quadlet-pod-container.service", "NeedDaemonReload": "no", "Nice": "0", "NoNewPrivileges": "no", "NonBlocking": "no", "NotifyAccess": "all", "OOMPolicy": "continue", "OOMScoreAdjust": "200", "OnFailureJobMode": "replace", "OnSuccessJobMode": "fail", "Perpetual": "no", "PrivateDevices": "no", "PrivateIPC": "no", "PrivateMounts": "no", "PrivateNetwork": "no", "PrivateTmp": "no", "PrivateUsers": "no", "ProcSubset": "all", "ProtectClock": "no", "ProtectControlGroups": "no", "ProtectHome": "no", "ProtectHostname": "no", "ProtectKernelLogs": "no", "ProtectKernelModules": "no", "ProtectKernelTunables": "no", "ProtectProc": "default", "ProtectSystem": "no", "RefuseManualStart": "no", "RefuseManualStop": "no", "ReloadResult": "success", "ReloadSignal": "1", "RemainAfterExit": "no", "RemoveIPC": "no", "Requires": "app.slice basic.target", "RequiresMountsFor": "/run/user/2223/containers", "Restart": "no", "RestartKillSignal": "15", "RestartUSec": "100ms", "RestrictNamespaces": "no", "RestrictRealtime": "no", "RestrictSUIDSGID": "no", "Result": "success", "RootDirectoryStartOnly": "no", "RuntimeDirectoryMode": "0755", "RuntimeDirectoryPreserve": "no", "RuntimeMaxUSec": "infinity", "RuntimeRandomizedExtraUSec": "0", "SameProcessGroup": "no", "SecureBits": "0", "SendSIGHUP": "no", "SendSIGKILL": "yes", "Slice": "app.slice", "SourcePath": "/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-container.container", "StandardError": "inherit", "StandardInput": "null", "StandardOutput": "journal", "StartLimitAction": "none", "StartLimitBurst": "5", "StartLimitIntervalUSec": "10s", "StartupBlockIOWeight": "[not set]", "StartupCPUShares": "[not set]", "StartupCPUWeight": "[not set]", "StartupIOWeight": "[not set]", "StateChangeTimestampMonotonic": "0", "StateDirectoryMode": "0755", "StatusErrno": "0", "StopWhenUnneeded": "no", "SubState": "dead", "SuccessAction": "none", "SyslogFacility": "3", "SyslogIdentifier": "quadlet-pod-container", "SyslogLevel": "6", "SyslogLevelPrefix": "yes", "SyslogPriority": "30", "SystemCallErrorNumber": "2147483646", "TTYReset": "no", "TTYVHangup": "no", "TTYVTDisallocate": "no", "TasksAccounting": "yes", "TasksCurrent": "[not set]", "TasksMax": "21900", "TimeoutAbortUSec": "1min 30s", "TimeoutCleanUSec": "infinity", "TimeoutStartFailureMode": "terminate", "TimeoutStartUSec": "1min 30s", "TimeoutStopFailureMode": "terminate", "TimeoutStopUSec": "1min 30s", "TimerSlackNSec": "50000", "Transient": "no", "Type": "notify", "UID": "[not set]", "UMask": "0022", "UnitFilePreset": "disabled", "UnitFileState": "generated", "UtmpMode": "init", "WantedBy": "default.target quadlet-pod-pod-pod.service", "Wants": "podman-user-wait-network-online.service", "WatchdogSignal": "6", "WatchdogTimestampMonotonic": "0", "WatchdogUSec": "infinity", "WorkingDirectory": "!/home/user_quadlet_pod" } } TASK [fedora.linux_system_roles.podman : Restart service] ********************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:131 Monday 07 July 2025 20:17:07 -0400 (0:00:00.698) 0:00:35.693 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_service_started is changed", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Cancel linger] ************************ task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:198 Monday 07 July 2025 20:17:08 -0400 (0:00:00.065) 0:00:35.758 *********** skipping: [managed-node2] => { "changed": false, "skipped_reason": "No items in the list" } TASK [fedora.linux_system_roles.podman : Handle credential files - absent] ***** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:204 Monday 07 July 2025 20:17:08 -0400 (0:00:00.048) 0:00:35.807 *********** skipping: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Handle certs.d files - absent] ******** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:213 Monday 07 July 2025 20:17:08 -0400 (0:00:00.047) 0:00:35.855 *********** skipping: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [Check files] ************************************************************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:70 Monday 07 July 2025 20:17:08 -0400 (0:00:00.077) 0:00:35.932 *********** ok: [managed-node2] => (item=quadlet-pod-container.container) => { "ansible_loop_var": "item", "changed": false, "cmd": [ "cat", "/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-container.container" ], "delta": "0:00:00.002957", "end": "2025-07-07 20:17:08.525259", "item": "quadlet-pod-container.container", "rc": 0, "start": "2025-07-07 20:17:08.522302" } STDOUT: # # Ansible managed # # system_role:podman [Install] WantedBy=default.target [Container] Image=quay.io/libpod/testimage:20210610 ContainerName=quadlet-pod-container Pod=quadlet-pod-pod.pod Exec=/bin/busybox-extras httpd -f -p 80 ok: [managed-node2] => (item=quadlet-pod-pod.pod) => { "ansible_loop_var": "item", "changed": false, "cmd": [ "cat", "/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-pod.pod" ], "delta": "0:00:00.003170", "end": "2025-07-07 20:17:08.853728", "item": "quadlet-pod-pod.pod", "rc": 0, "start": "2025-07-07 20:17:08.850558" } STDOUT: # # Ansible managed # # system_role:podman [Pod] PodName=quadlet-pod TASK [Check pod] *************************************************************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:79 Monday 07 July 2025 20:17:08 -0400 (0:00:00.740) 0:00:36.673 *********** ok: [managed-node2] => { "changed": false, "cmd": [ "podman", "pod", "inspect", "quadlet-pod", "--format", "{{range .Containers}}{{.Name}}\n{{end}}" ], "delta": "0:00:00.055653", "end": "2025-07-07 20:17:09.326246", "failed_when_result": false, "rc": 0, "start": "2025-07-07 20:17:09.270593" } STDOUT: quadlet-pod-infra quadlet-pod-container TASK [Ensure linger] *********************************************************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:91 Monday 07 July 2025 20:17:09 -0400 (0:00:00.477) 0:00:37.150 *********** ok: [managed-node2] => { "changed": false, "failed_when_result": false, "stat": { "atime": 1751933818.6499095, "attr_flags": "", "attributes": [], "block_size": 4096, "blocks": 0, "charset": "binary", "checksum": "da39a3ee5e6b4b0d3255bfef95601890afd80709", "ctime": 1751933818.6499095, "dev": 51713, "device_type": 0, "executable": false, "exists": true, "gid": 0, "gr_name": "root", "inode": 12786454, "isblk": false, "ischr": false, "isdir": false, "isfifo": false, "isgid": false, "islnk": false, "isreg": true, "issock": false, "isuid": false, "mimetype": "inode/x-empty", "mode": "0644", "mtime": 1751933818.6499095, "nlink": 1, "path": "/var/lib/systemd/linger/user_quadlet_pod", "pw_name": "root", "readable": true, "rgrp": true, "roth": true, "rusr": true, "size": 0, "uid": 0, "version": "2833537922", "wgrp": false, "woth": false, "writeable": true, "wusr": true, "xgrp": false, "xoth": false, "xusr": false } } TASK [Cleanup user] ************************************************************ task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:99 Monday 07 July 2025 20:17:09 -0400 (0:00:00.423) 0:00:37.574 *********** included: fedora.linux_system_roles.podman for managed-node2 TASK [fedora.linux_system_roles.podman : Set platform/version specific variables] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:3 Monday 07 July 2025 20:17:10 -0400 (0:00:00.196) 0:00:37.771 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Ensure ansible_facts used by role] **** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:3 Monday 07 July 2025 20:17:10 -0400 (0:00:00.092) 0:00:37.863 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_required_facts | difference(ansible_facts.keys() | list) | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Check if system is ostree] ************ task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:11 Monday 07 July 2025 20:17:10 -0400 (0:00:00.063) 0:00:37.927 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_is_ostree is defined", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set flag to indicate system is ostree] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:16 Monday 07 July 2025 20:17:10 -0400 (0:00:00.054) 0:00:37.982 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_is_ostree is defined", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Check if transactional-update exists in /sbin] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:23 Monday 07 July 2025 20:17:10 -0400 (0:00:00.053) 0:00:38.035 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_is_transactional is defined", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set flag if transactional-update exists] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:28 Monday 07 July 2025 20:17:10 -0400 (0:00:00.054) 0:00:38.090 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_is_transactional is defined", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set platform/version specific variables] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:32 Monday 07 July 2025 20:17:10 -0400 (0:00:00.052) 0:00:38.143 *********** ok: [managed-node2] => (item=RedHat.yml) => { "ansible_facts": { "__podman_packages": [ "podman", "shadow-utils-subid" ] }, "ansible_included_var_files": [ "/tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/vars/RedHat.yml" ], "ansible_loop_var": "item", "changed": false, "item": "RedHat.yml" } skipping: [managed-node2] => (item=CentOS.yml) => { "ansible_loop_var": "item", "changed": false, "false_condition": "__vars_file is file", "item": "CentOS.yml", "skip_reason": "Conditional result was False" } skipping: [managed-node2] => (item=CentOS_9.yml) => { "ansible_loop_var": "item", "changed": false, "false_condition": "__vars_file is file", "item": "CentOS_9.yml", "skip_reason": "Conditional result was False" } skipping: [managed-node2] => (item=CentOS_9.yml) => { "ansible_loop_var": "item", "changed": false, "false_condition": "__vars_file is file", "item": "CentOS_9.yml", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Gather the package facts] ************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:6 Monday 07 July 2025 20:17:10 -0400 (0:00:00.160) 0:00:38.304 *********** ok: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Enable copr if requested] ************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:10 Monday 07 July 2025 20:17:11 -0400 (0:00:00.810) 0:00:39.114 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_use_copr | d(false)", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Ensure required packages are installed] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:14 Monday 07 July 2025 20:17:11 -0400 (0:00:00.032) 0:00:39.147 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "(__podman_packages | difference(ansible_facts.packages)) | list | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Notify user that reboot is needed to apply changes] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:28 Monday 07 July 2025 20:17:11 -0400 (0:00:00.039) 0:00:39.187 *********** skipping: [managed-node2] => { "false_condition": "__podman_is_transactional | d(false)" } TASK [fedora.linux_system_roles.podman : Reboot transactional update systems] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:33 Monday 07 July 2025 20:17:11 -0400 (0:00:00.031) 0:00:39.218 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_is_transactional | d(false)", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if reboot is needed and not set] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:38 Monday 07 July 2025 20:17:11 -0400 (0:00:00.034) 0:00:39.253 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_is_transactional | d(false)", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get podman version] ******************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:46 Monday 07 July 2025 20:17:11 -0400 (0:00:00.031) 0:00:39.285 *********** ok: [managed-node2] => { "changed": false, "cmd": [ "podman", "--version" ], "delta": "0:00:00.025942", "end": "2025-07-07 20:17:11.862665", "rc": 0, "start": "2025-07-07 20:17:11.836723" } STDOUT: podman version 5.5.1 TASK [fedora.linux_system_roles.podman : Set podman version] ******************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:52 Monday 07 July 2025 20:17:11 -0400 (0:00:00.379) 0:00:39.664 *********** ok: [managed-node2] => { "ansible_facts": { "podman_version": "5.5.1" }, "changed": false } TASK [fedora.linux_system_roles.podman : Podman package version must be 4.2 or later] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:56 Monday 07 July 2025 20:17:11 -0400 (0:00:00.033) 0:00:39.698 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_version is version(\"4.2\", \"<\")", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Podman package version must be 4.4 or later for quadlet, secrets] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:63 Monday 07 July 2025 20:17:11 -0400 (0:00:00.032) 0:00:39.731 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_version is version(\"4.4\", \"<\")", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Podman package version must be 4.4 or later for quadlet, secrets] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:73 Monday 07 July 2025 20:17:12 -0400 (0:00:00.042) 0:00:39.774 *********** META: end_host conditional evaluated to False, continuing execution for managed-node2 skipping: [managed-node2] => { "skip_reason": "end_host conditional evaluated to False, continuing execution for managed-node2" } MSG: end_host conditional evaluated to false, continuing execution for managed-node2 TASK [fedora.linux_system_roles.podman : Podman package version must be 5.0 or later for Pod quadlets] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:80 Monday 07 July 2025 20:17:12 -0400 (0:00:00.040) 0:00:39.815 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_version is version(\"5.0\", \"<\")", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Podman package version must be 5.0 or later for Pod quadlets] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:96 Monday 07 July 2025 20:17:12 -0400 (0:00:00.056) 0:00:39.871 *********** META: end_host conditional evaluated to False, continuing execution for managed-node2 skipping: [managed-node2] => { "skip_reason": "end_host conditional evaluated to False, continuing execution for managed-node2" } MSG: end_host conditional evaluated to false, continuing execution for managed-node2 TASK [fedora.linux_system_roles.podman : Check user and group information] ***** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:109 Monday 07 July 2025 20:17:12 -0400 (0:00:00.056) 0:00:39.927 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Get user information] ***************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:2 Monday 07 July 2025 20:17:12 -0400 (0:00:00.061) 0:00:39.989 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "'getent_passwd' not in ansible_facts or __podman_user not in ansible_facts['getent_passwd']", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user does not exist] ********** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:9 Monday 07 July 2025 20:17:12 -0400 (0:00:00.036) 0:00:40.026 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not ansible_facts[\"getent_passwd\"][__podman_user]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set group for podman user] ************ task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:16 Monday 07 July 2025 20:17:12 -0400 (0:00:00.136) 0:00:40.163 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_group": "2223" }, "changed": false } TASK [fedora.linux_system_roles.podman : See if getsubids exists] ************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:31 Monday 07 July 2025 20:17:12 -0400 (0:00:00.045) 0:00:40.208 *********** ok: [managed-node2] => { "changed": false, "stat": { "atime": 1751933455.3375134, "attr_flags": "", "attributes": [], "block_size": 4096, "blocks": 32, "charset": "binary", "checksum": "8bedde2dbca15219e1a3b95a68a8c0d26a92ba62", "ctime": 1751933428.1541803, "dev": 51713, "device_type": 0, "executable": true, "exists": true, "gid": 0, "gr_name": "root", "inode": 2118, "isblk": false, "ischr": false, "isdir": false, "isfifo": false, "isgid": false, "islnk": false, "isreg": true, "issock": false, "isuid": false, "mimetype": "application/x-pie-executable", "mode": "0755", "mtime": 1748273472.0, "nlink": 1, "path": "/usr/bin/getsubids", "pw_name": "root", "readable": true, "rgrp": true, "roth": true, "rusr": true, "size": 15496, "uid": 0, "version": "2386316427", "wgrp": false, "woth": false, "writeable": true, "wusr": true, "xgrp": true, "xoth": true, "xusr": true } } TASK [fedora.linux_system_roles.podman : Check with getsubids for user subuids] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:42 Monday 07 July 2025 20:17:12 -0400 (0:00:00.364) 0:00:40.572 *********** ok: [managed-node2] => { "changed": false, "cmd": [ "getsubids", "user_quadlet_pod" ], "delta": "0:00:00.003547", "end": "2025-07-07 20:17:13.134234", "rc": 0, "start": "2025-07-07 20:17:13.130687" } STDOUT: 0: user_quadlet_pod 165536 65536 TASK [fedora.linux_system_roles.podman : Check with getsubids for user subgids] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:47 Monday 07 July 2025 20:17:13 -0400 (0:00:00.362) 0:00:40.935 *********** ok: [managed-node2] => { "changed": false, "cmd": [ "getsubids", "-g", "user_quadlet_pod" ], "delta": "0:00:00.006163", "end": "2025-07-07 20:17:13.497752", "rc": 0, "start": "2025-07-07 20:17:13.491589" } STDOUT: 0: user_quadlet_pod 165536 65536 TASK [fedora.linux_system_roles.podman : Set user subuid and subgid info] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:52 Monday 07 July 2025 20:17:13 -0400 (0:00:00.360) 0:00:41.295 *********** ok: [managed-node2] => { "ansible_facts": { "podman_subgid_info": { "user_quadlet_pod": { "range": 65536, "start": 165536 } }, "podman_subuid_info": { "user_quadlet_pod": { "range": 65536, "start": 165536 } } }, "changed": false } TASK [fedora.linux_system_roles.podman : Get subuid file] ********************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:65 Monday 07 July 2025 20:17:13 -0400 (0:00:00.051) 0:00:41.346 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get subgid file] ********************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:70 Monday 07 July 2025 20:17:13 -0400 (0:00:00.033) 0:00:41.379 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set user subuid and subgid info] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:75 Monday 07 July 2025 20:17:13 -0400 (0:00:00.032) 0:00:41.412 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user not in subuid file] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:85 Monday 07 July 2025 20:17:13 -0400 (0:00:00.032) 0:00:41.445 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user not in subgid file] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:92 Monday 07 July 2025 20:17:13 -0400 (0:00:00.033) 0:00:41.479 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set config file paths] **************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:115 Monday 07 July 2025 20:17:13 -0400 (0:00:00.036) 0:00:41.515 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_container_conf_file": "/home/user_quadlet_pod/.config/containers/containers.conf.d/50-systemroles.conf", "__podman_parent_mode": "0700", "__podman_parent_path": "/home/user_quadlet_pod/.config/containers", "__podman_policy_json_file": "/home/user_quadlet_pod/.config/containers/policy.json", "__podman_registries_conf_file": "/home/user_quadlet_pod/.config/containers/registries.conf.d/50-systemroles.conf", "__podman_storage_conf_file": "/home/user_quadlet_pod/.config/containers/storage.conf" }, "changed": false } TASK [fedora.linux_system_roles.podman : Handle container.conf.d] ************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:126 Monday 07 July 2025 20:17:13 -0400 (0:00:00.068) 0:00:41.583 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_container_conf_d.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Ensure containers.d exists] *********** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_container_conf_d.yml:5 Monday 07 July 2025 20:17:13 -0400 (0:00:00.101) 0:00:41.685 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_containers_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Update container config file] ********* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_container_conf_d.yml:13 Monday 07 July 2025 20:17:14 -0400 (0:00:00.053) 0:00:41.739 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_containers_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Handle registries.conf.d] ************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:129 Monday 07 July 2025 20:17:14 -0400 (0:00:00.057) 0:00:41.796 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_registries_conf_d.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Ensure registries.d exists] *********** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_registries_conf_d.yml:5 Monday 07 July 2025 20:17:14 -0400 (0:00:00.102) 0:00:41.899 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_registries_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Update registries config file] ******** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_registries_conf_d.yml:13 Monday 07 July 2025 20:17:14 -0400 (0:00:00.038) 0:00:41.937 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_registries_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Handle storage.conf] ****************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:132 Monday 07 July 2025 20:17:14 -0400 (0:00:00.036) 0:00:41.974 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_storage_conf.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Ensure storage.conf parent dir exists] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_storage_conf.yml:7 Monday 07 July 2025 20:17:14 -0400 (0:00:00.124) 0:00:42.098 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_storage_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Update storage config file] *********** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_storage_conf.yml:15 Monday 07 July 2025 20:17:14 -0400 (0:00:00.033) 0:00:42.132 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_storage_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Handle policy.json] ******************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:135 Monday 07 July 2025 20:17:14 -0400 (0:00:00.034) 0:00:42.166 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_policy_json.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Ensure policy.json parent dir exists] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_policy_json.yml:8 Monday 07 July 2025 20:17:14 -0400 (0:00:00.062) 0:00:42.228 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_policy_json | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Stat the policy.json file] ************ task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_policy_json.yml:16 Monday 07 July 2025 20:17:14 -0400 (0:00:00.031) 0:00:42.260 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_policy_json | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get the existing policy.json] ********* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_policy_json.yml:21 Monday 07 July 2025 20:17:14 -0400 (0:00:00.031) 0:00:42.292 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_policy_json | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Write new policy.json file] *********** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_policy_json.yml:27 Monday 07 July 2025 20:17:14 -0400 (0:00:00.031) 0:00:42.323 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_policy_json | length > 0", "skip_reason": "Conditional result was False" } TASK [Manage firewall for specified ports] ************************************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:141 Monday 07 July 2025 20:17:14 -0400 (0:00:00.032) 0:00:42.356 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_firewall | length > 0", "skip_reason": "Conditional result was False" } TASK [Manage selinux for specified ports] ************************************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:148 Monday 07 July 2025 20:17:14 -0400 (0:00:00.037) 0:00:42.393 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_selinux_ports | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Keep track of users that need to cancel linger] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:155 Monday 07 July 2025 20:17:14 -0400 (0:00:00.046) 0:00:42.440 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_cancel_user_linger": [] }, "changed": false } TASK [fedora.linux_system_roles.podman : Handle certs.d files - present] ******* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:159 Monday 07 July 2025 20:17:14 -0400 (0:00:00.052) 0:00:42.493 *********** skipping: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Handle credential files - present] **** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:168 Monday 07 July 2025 20:17:14 -0400 (0:00:00.035) 0:00:42.528 *********** skipping: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Handle secrets] *********************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:177 Monday 07 July 2025 20:17:14 -0400 (0:00:00.034) 0:00:42.563 *********** skipping: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Handle Kubernetes specifications] ***** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:184 Monday 07 July 2025 20:17:14 -0400 (0:00:00.033) 0:00:42.596 *********** skipping: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Handle Quadlet specifications] ******** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:191 Monday 07 July 2025 20:17:14 -0400 (0:00:00.036) 0:00:42.633 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml for managed-node2 => (item=(censored due to no_log)) included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml for managed-node2 => (item=(censored due to no_log)) TASK [fedora.linux_system_roles.podman : Set per-container variables part 0] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:14 Monday 07 July 2025 20:17:14 -0400 (0:00:00.095) 0:00:42.728 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_quadlet_file_src": "", "__podman_quadlet_spec": { "Container": { "ContainerName": "quadlet-pod-container", "Exec": "/bin/busybox-extras httpd -f -p 80", "Image": "quay.io/libpod/testimage:20210610", "Pod": "quadlet-pod-pod.pod" }, "Install": { "WantedBy": "default.target" } }, "__podman_quadlet_str": "", "__podman_quadlet_template_src": "" }, "changed": false } TASK [fedora.linux_system_roles.podman : Set per-container variables part 1] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:25 Monday 07 July 2025 20:17:15 -0400 (0:00:00.041) 0:00:42.770 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_continue_if_pull_fails": false, "__podman_pull_image": true, "__podman_state": "absent", "__podman_systemd_unit_scope": "", "__podman_user": "user_quadlet_pod" }, "changed": false } TASK [fedora.linux_system_roles.podman : Fail if no quadlet spec is given] ***** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:35 Monday 07 July 2025 20:17:15 -0400 (0:00:00.045) 0:00:42.815 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_quadlet_spec | length == 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set per-container variables part 2] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:48 Monday 07 July 2025 20:17:15 -0400 (0:00:00.032) 0:00:42.848 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_quadlet_name": "quadlet-pod-container", "__podman_quadlet_type": "container", "__podman_rootless": true }, "changed": false } TASK [fedora.linux_system_roles.podman : Check user and group information] ***** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:57 Monday 07 July 2025 20:17:15 -0400 (0:00:00.097) 0:00:42.945 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Get user information] ***************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:2 Monday 07 July 2025 20:17:15 -0400 (0:00:00.084) 0:00:43.030 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "'getent_passwd' not in ansible_facts or __podman_user not in ansible_facts['getent_passwd']", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user does not exist] ********** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:9 Monday 07 July 2025 20:17:15 -0400 (0:00:00.060) 0:00:43.091 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not ansible_facts[\"getent_passwd\"][__podman_user]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set group for podman user] ************ task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:16 Monday 07 July 2025 20:17:15 -0400 (0:00:00.062) 0:00:43.154 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_group": "2223" }, "changed": false } TASK [fedora.linux_system_roles.podman : See if getsubids exists] ************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:31 Monday 07 July 2025 20:17:15 -0400 (0:00:00.074) 0:00:43.228 *********** ok: [managed-node2] => { "changed": false, "stat": { "atime": 1751933455.3375134, "attr_flags": "", "attributes": [], "block_size": 4096, "blocks": 32, "charset": "binary", "checksum": "8bedde2dbca15219e1a3b95a68a8c0d26a92ba62", "ctime": 1751933428.1541803, "dev": 51713, "device_type": 0, "executable": true, "exists": true, "gid": 0, "gr_name": "root", "inode": 2118, "isblk": false, "ischr": false, "isdir": false, "isfifo": false, "isgid": false, "islnk": false, "isreg": true, "issock": false, "isuid": false, "mimetype": "application/x-pie-executable", "mode": "0755", "mtime": 1748273472.0, "nlink": 1, "path": "/usr/bin/getsubids", "pw_name": "root", "readable": true, "rgrp": true, "roth": true, "rusr": true, "size": 15496, "uid": 0, "version": "2386316427", "wgrp": false, "woth": false, "writeable": true, "wusr": true, "xgrp": true, "xoth": true, "xusr": true } } TASK [fedora.linux_system_roles.podman : Check with getsubids for user subuids] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:42 Monday 07 July 2025 20:17:15 -0400 (0:00:00.380) 0:00:43.609 *********** ok: [managed-node2] => { "changed": false, "cmd": [ "getsubids", "user_quadlet_pod" ], "delta": "0:00:00.003576", "end": "2025-07-07 20:17:16.168188", "rc": 0, "start": "2025-07-07 20:17:16.164612" } STDOUT: 0: user_quadlet_pod 165536 65536 TASK [fedora.linux_system_roles.podman : Check with getsubids for user subgids] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:47 Monday 07 July 2025 20:17:16 -0400 (0:00:00.357) 0:00:43.966 *********** ok: [managed-node2] => { "changed": false, "cmd": [ "getsubids", "-g", "user_quadlet_pod" ], "delta": "0:00:00.005784", "end": "2025-07-07 20:17:16.525020", "rc": 0, "start": "2025-07-07 20:17:16.519236" } STDOUT: 0: user_quadlet_pod 165536 65536 TASK [fedora.linux_system_roles.podman : Set user subuid and subgid info] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:52 Monday 07 July 2025 20:17:16 -0400 (0:00:00.387) 0:00:44.354 *********** ok: [managed-node2] => { "ansible_facts": { "podman_subgid_info": { "user_quadlet_pod": { "range": 65536, "start": 165536 } }, "podman_subuid_info": { "user_quadlet_pod": { "range": 65536, "start": 165536 } } }, "changed": false } TASK [fedora.linux_system_roles.podman : Get subuid file] ********************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:65 Monday 07 July 2025 20:17:16 -0400 (0:00:00.079) 0:00:44.433 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get subgid file] ********************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:70 Monday 07 July 2025 20:17:16 -0400 (0:00:00.056) 0:00:44.490 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set user subuid and subgid info] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:75 Monday 07 July 2025 20:17:16 -0400 (0:00:00.054) 0:00:44.544 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user not in subuid file] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:85 Monday 07 July 2025 20:17:16 -0400 (0:00:00.055) 0:00:44.600 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user not in subgid file] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:92 Monday 07 July 2025 20:17:16 -0400 (0:00:00.055) 0:00:44.655 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set per-container variables part 3] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:62 Monday 07 July 2025 20:17:16 -0400 (0:00:00.055) 0:00:44.711 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_activate_systemd_unit": true, "__podman_images_found": [ "quay.io/libpod/testimage:20210610" ], "__podman_kube_yamls_raw": "", "__podman_service_name": "quadlet-pod-container.service", "__podman_systemd_scope": "user", "__podman_user_home_dir": "/home/user_quadlet_pod", "__podman_xdg_runtime_dir": "/run/user/2223" }, "changed": false } TASK [fedora.linux_system_roles.podman : Set per-container variables part 4] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:73 Monday 07 July 2025 20:17:17 -0400 (0:00:00.088) 0:00:44.800 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_quadlet_path": "/home/user_quadlet_pod/.config/containers/systemd" }, "changed": false } TASK [fedora.linux_system_roles.podman : Get kube yaml contents] *************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:77 Monday 07 July 2025 20:17:17 -0400 (0:00:00.067) 0:00:44.868 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_state != \"absent\"", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set per-container variables part 5] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:88 Monday 07 July 2025 20:17:17 -0400 (0:00:00.050) 0:00:44.919 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_images": [ "quay.io/libpod/testimage:20210610" ], "__podman_quadlet_file": "/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-container.container", "__podman_volumes": [] }, "changed": false } TASK [fedora.linux_system_roles.podman : Set per-container variables part 6] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:106 Monday 07 July 2025 20:17:17 -0400 (0:00:00.128) 0:00:45.047 *********** ok: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Cleanup quadlets] ********************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:113 Monday 07 July 2025 20:17:17 -0400 (0:00:00.062) 0:00:45.110 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/cleanup_quadlet_spec.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Stat XDG_RUNTIME_DIR] ***************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/cleanup_quadlet_spec.yml:4 Monday 07 July 2025 20:17:17 -0400 (0:00:00.178) 0:00:45.289 *********** ok: [managed-node2] => { "changed": false, "stat": { "atime": 1751933818.6829095, "attr_flags": "", "attributes": [], "block_size": 4096, "blocks": 0, "charset": "binary", "ctime": 1751933827.7909672, "dev": 65, "device_type": 0, "executable": true, "exists": true, "gid": 2223, "gr_name": "user_quadlet_pod", "inode": 1, "isblk": false, "ischr": false, "isdir": true, "isfifo": false, "isgid": false, "islnk": false, "isreg": false, "issock": false, "isuid": false, "mimetype": "inode/directory", "mode": "0700", "mtime": 1751933827.7909672, "nlink": 7, "path": "/run/user/2223", "pw_name": "user_quadlet_pod", "readable": true, "rgrp": false, "roth": false, "rusr": true, "size": 220, "uid": 2223, "version": null, "wgrp": false, "woth": false, "writeable": true, "wusr": true, "xgrp": false, "xoth": false, "xusr": true } } TASK [fedora.linux_system_roles.podman : Stop and disable service] ************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/cleanup_quadlet_spec.yml:12 Monday 07 July 2025 20:17:17 -0400 (0:00:00.417) 0:00:45.706 *********** changed: [managed-node2] => { "changed": true, "enabled": false, "failed_when_result": false, "name": "quadlet-pod-container.service", "state": "stopped", "status": { "AccessSELinuxContext": "unconfined_u:object_r:user_tmp_t:s0", "ActiveEnterTimestamp": "Mon 2025-07-07 20:17:07 EDT", "ActiveEnterTimestampMonotonic": "853213205", "ActiveExitTimestampMonotonic": "0", "ActiveState": "active", "After": "run-user-2223.mount app.slice podman-user-wait-network-online.service basic.target quadlet-pod-pod-pod.service -.mount", "AllowIsolate": "no", "AssertResult": "yes", "AssertTimestamp": "Mon 2025-07-07 20:17:07 EDT", "AssertTimestampMonotonic": "853089335", "Before": "shutdown.target default.target", "BindsTo": "quadlet-pod-pod-pod.service", "BlockIOAccounting": "no", "BlockIOWeight": "[not set]", "CPUAccounting": "yes", "CPUAffinityFromNUMA": "no", "CPUQuotaPerSecUSec": "infinity", "CPUQuotaPeriodUSec": "infinity", "CPUSchedulingPolicy": "0", "CPUSchedulingPriority": "0", "CPUSchedulingResetOnFork": "no", "CPUShares": "[not set]", "CPUUsageNSec": "85290000", "CPUWeight": "[not set]", "CacheDirectoryMode": "0755", "CanFreeze": "yes", "CanIsolate": "no", "CanReload": "no", "CanStart": "yes", "CanStop": "yes", "CapabilityBoundingSet": "cap_chown cap_dac_override cap_dac_read_search cap_fowner cap_fsetid cap_kill cap_setgid cap_setuid cap_setpcap cap_linux_immutable cap_net_bind_service cap_net_broadcast cap_net_admin cap_net_raw cap_ipc_lock cap_ipc_owner cap_sys_module cap_sys_rawio cap_sys_chroot cap_sys_ptrace cap_sys_pacct cap_sys_admin cap_sys_boot cap_sys_nice cap_sys_resource cap_sys_time cap_sys_tty_config cap_mknod cap_lease cap_audit_write cap_audit_control cap_setfcap cap_mac_override cap_mac_admin cap_syslog cap_wake_alarm cap_block_suspend cap_audit_read cap_perfmon cap_bpf cap_checkpoint_restore", "CleanResult": "success", "CollectMode": "inactive", "ConditionResult": "yes", "ConditionTimestamp": "Mon 2025-07-07 20:17:07 EDT", "ConditionTimestampMonotonic": "853089330", "ConfigurationDirectoryMode": "0755", "Conflicts": "shutdown.target", "ControlGroup": "/user.slice/user-2223.slice/user@2223.service/app.slice/quadlet-pod-container.service", "ControlGroupId": "11933", "ControlPID": "0", "CoredumpFilter": "0x33", "DefaultDependencies": "yes", "DefaultMemoryLow": "0", "DefaultMemoryMin": "0", "Delegate": "yes", "DelegateControllers": "cpu cpuacct cpuset io blkio memory devices pids bpf-firewall bpf-devices bpf-foreign bpf-socket-bind bpf-restrict-network-interfaces", "Description": "quadlet-pod-container.service", "DevicePolicy": "auto", "DynamicUser": "no", "Environment": "PODMAN_SYSTEMD_UNIT=quadlet-pod-container.service", "ExecMainCode": "0", "ExecMainExitTimestampMonotonic": "0", "ExecMainPID": "62993", "ExecMainStartTimestamp": "Mon 2025-07-07 20:17:07 EDT", "ExecMainStartTimestampMonotonic": "853213135", "ExecMainStatus": "0", "ExecStart": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman run --name quadlet-pod-container --cidfile=/run/user/2223/quadlet-pod-container.cid --replace --rm --cgroups=split --sdnotify=conmon -d --pod-id-file /run/user/2223/quadlet-pod-pod-pod.pod-id quay.io/libpod/testimage:20210610 /bin/busybox-extras httpd -f -p 80 ; ignore_errors=no ; start_time=[Mon 2025-07-07 20:17:07 EDT] ; stop_time=[n/a] ; pid=62982 ; code=(null) ; status=0/0 }", "ExecStartEx": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman run --name quadlet-pod-container --cidfile=/run/user/2223/quadlet-pod-container.cid --replace --rm --cgroups=split --sdnotify=conmon -d --pod-id-file /run/user/2223/quadlet-pod-pod-pod.pod-id quay.io/libpod/testimage:20210610 /bin/busybox-extras httpd -f -p 80 ; flags= ; start_time=[Mon 2025-07-07 20:17:07 EDT] ; stop_time=[n/a] ; pid=62982 ; code=(null) ; status=0/0 }", "ExecStop": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman rm -v -f -i --cidfile=/run/user/2223/quadlet-pod-container.cid ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStopEx": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman rm -v -f -i --cidfile=/run/user/2223/quadlet-pod-container.cid ; flags= ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStopPost": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman rm -v -f -i --cidfile=/run/user/2223/quadlet-pod-container.cid ; ignore_errors=yes ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStopPostEx": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman rm -v -f -i --cidfile=/run/user/2223/quadlet-pod-container.cid ; flags=ignore-failure ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExitType": "main", "FailureAction": "none", "FileDescriptorStoreMax": "0", "FinalKillSignal": "9", "FragmentPath": "/run/user/2223/systemd/generator/quadlet-pod-container.service", "FreezerState": "running", "GID": "[not set]", "GuessMainPID": "yes", "IOAccounting": "no", "IOReadBytes": "18446744073709551615", "IOReadOperations": "18446744073709551615", "IOSchedulingClass": "2", "IOSchedulingPriority": "4", "IOWeight": "[not set]", "IOWriteBytes": "18446744073709551615", "IOWriteOperations": "18446744073709551615", "IPAccounting": "no", "IPEgressBytes": "[no data]", "IPEgressPackets": "[no data]", "IPIngressBytes": "[no data]", "IPIngressPackets": "[no data]", "Id": "quadlet-pod-container.service", "IgnoreOnIsolate": "no", "IgnoreSIGPIPE": "yes", "InactiveEnterTimestampMonotonic": "0", "InactiveExitTimestamp": "Mon 2025-07-07 20:17:07 EDT", "InactiveExitTimestampMonotonic": "853099444", "InvocationID": "c1dce7ce163540689b9f034da0546997", "JobRunningTimeoutUSec": "infinity", "JobTimeoutAction": "none", "JobTimeoutUSec": "infinity", "KeyringMode": "inherit", "KillMode": "mixed", "KillSignal": "15", "LimitAS": "infinity", "LimitASSoft": "infinity", "LimitCORE": "infinity", "LimitCORESoft": "infinity", "LimitCPU": "infinity", "LimitCPUSoft": "infinity", "LimitDATA": "infinity", "LimitDATASoft": "infinity", "LimitFSIZE": "infinity", "LimitFSIZESoft": "infinity", "LimitLOCKS": "infinity", "LimitLOCKSSoft": "infinity", "LimitMEMLOCK": "8388608", "LimitMEMLOCKSoft": "8388608", "LimitMSGQUEUE": "819200", "LimitMSGQUEUESoft": "819200", "LimitNICE": "0", "LimitNICESoft": "0", "LimitNOFILE": "524288", "LimitNOFILESoft": "1024", "LimitNPROC": "13688", "LimitNPROCSoft": "13688", "LimitRSS": "infinity", "LimitRSSSoft": "infinity", "LimitRTPRIO": "0", "LimitRTPRIOSoft": "0", "LimitRTTIME": "infinity", "LimitRTTIMESoft": "infinity", "LimitSIGPENDING": "13688", "LimitSIGPENDINGSoft": "13688", "LimitSTACK": "infinity", "LimitSTACKSoft": "8388608", "LoadState": "loaded", "LockPersonality": "no", "LogLevelMax": "-1", "LogRateLimitBurst": "0", "LogRateLimitIntervalUSec": "0", "LogsDirectoryMode": "0755", "MainPID": "62993", "ManagedOOMMemoryPressure": "auto", "ManagedOOMMemoryPressureLimit": "0", "ManagedOOMPreference": "none", "ManagedOOMSwap": "auto", "MemoryAccounting": "yes", "MemoryAvailable": "infinity", "MemoryCurrent": "978944", "MemoryDenyWriteExecute": "no", "MemoryHigh": "infinity", "MemoryLimit": "infinity", "MemoryLow": "0", "MemoryMax": "infinity", "MemoryMin": "0", "MemorySwapMax": "infinity", "MountAPIVFS": "no", "NFileDescriptorStore": "0", "NRestarts": "0", "NUMAPolicy": "n/a", "Names": "quadlet-pod-container.service", "NeedDaemonReload": "no", "Nice": "0", "NoNewPrivileges": "no", "NonBlocking": "no", "NotifyAccess": "all", "OOMPolicy": "continue", "OOMScoreAdjust": "200", "OnFailureJobMode": "replace", "OnSuccessJobMode": "fail", "Perpetual": "no", "PrivateDevices": "no", "PrivateIPC": "no", "PrivateMounts": "no", "PrivateNetwork": "no", "PrivateTmp": "no", "PrivateUsers": "no", "ProcSubset": "all", "ProtectClock": "no", "ProtectControlGroups": "no", "ProtectHome": "no", "ProtectHostname": "no", "ProtectKernelLogs": "no", "ProtectKernelModules": "no", "ProtectKernelTunables": "no", "ProtectProc": "default", "ProtectSystem": "no", "RefuseManualStart": "no", "RefuseManualStop": "no", "ReloadResult": "success", "ReloadSignal": "1", "RemainAfterExit": "no", "RemoveIPC": "no", "Requires": "app.slice basic.target", "RequiresMountsFor": "/run/user/2223/containers", "Restart": "no", "RestartKillSignal": "15", "RestartUSec": "100ms", "RestrictNamespaces": "no", "RestrictRealtime": "no", "RestrictSUIDSGID": "no", "Result": "success", "RootDirectoryStartOnly": "no", "RuntimeDirectoryMode": "0755", "RuntimeDirectoryPreserve": "no", "RuntimeMaxUSec": "infinity", "RuntimeRandomizedExtraUSec": "0", "SameProcessGroup": "no", "SecureBits": "0", "SendSIGHUP": "no", "SendSIGKILL": "yes", "Slice": "app.slice", "SourcePath": "/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-container.container", "StandardError": "inherit", "StandardInput": "null", "StandardOutput": "journal", "StartLimitAction": "none", "StartLimitBurst": "5", "StartLimitIntervalUSec": "10s", "StartupBlockIOWeight": "[not set]", "StartupCPUShares": "[not set]", "StartupCPUWeight": "[not set]", "StartupIOWeight": "[not set]", "StateChangeTimestamp": "Mon 2025-07-07 20:17:07 EDT", "StateChangeTimestampMonotonic": "853213205", "StateDirectoryMode": "0755", "StatusErrno": "0", "StopWhenUnneeded": "no", "SubState": "running", "SuccessAction": "none", "SyslogFacility": "3", "SyslogIdentifier": "quadlet-pod-container", "SyslogLevel": "6", "SyslogLevelPrefix": "yes", "SyslogPriority": "30", "SystemCallErrorNumber": "2147483646", "TTYReset": "no", "TTYVHangup": "no", "TTYVTDisallocate": "no", "TasksAccounting": "yes", "TasksCurrent": "2", "TasksMax": "21900", "TimeoutAbortUSec": "1min 30s", "TimeoutCleanUSec": "infinity", "TimeoutStartFailureMode": "terminate", "TimeoutStartUSec": "1min 30s", "TimeoutStopFailureMode": "terminate", "TimeoutStopUSec": "1min 30s", "TimerSlackNSec": "50000", "Transient": "no", "Type": "notify", "UID": "[not set]", "UMask": "0022", "UnitFilePreset": "disabled", "UnitFileState": "generated", "UtmpMode": "init", "WantedBy": "default.target quadlet-pod-pod-pod.service", "Wants": "podman-user-wait-network-online.service", "WatchdogSignal": "6", "WatchdogTimestampMonotonic": "0", "WatchdogUSec": "0", "WorkingDirectory": "!/home/user_quadlet_pod" } } TASK [fedora.linux_system_roles.podman : See if quadlet file exists] *********** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/cleanup_quadlet_spec.yml:34 Monday 07 July 2025 20:17:28 -0400 (0:00:10.918) 0:00:56.624 *********** ok: [managed-node2] => { "changed": false, "stat": { "atime": 1751933827.1369631, "attr_flags": "", "attributes": [], "block_size": 4096, "blocks": 8, "charset": "us-ascii", "checksum": "f0b5c8159fc3c65bf9310a371751609e4c1ba4c3", "ctime": 1751933826.5809596, "dev": 51713, "device_type": 0, "executable": false, "exists": true, "gid": 2223, "gr_name": "user_quadlet_pod", "inode": 41943287, "isblk": false, "ischr": false, "isdir": false, "isfifo": false, "isgid": false, "islnk": false, "isreg": true, "issock": false, "isuid": false, "mimetype": "text/plain", "mode": "0644", "mtime": 1751933826.324958, "nlink": 1, "path": "/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-container.container", "pw_name": "user_quadlet_pod", "readable": true, "rgrp": true, "roth": true, "rusr": true, "size": 230, "uid": 2223, "version": "2434089315", "wgrp": false, "woth": false, "writeable": true, "wusr": true, "xgrp": false, "xoth": false, "xusr": false } } TASK [fedora.linux_system_roles.podman : Parse quadlet file] ******************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/cleanup_quadlet_spec.yml:39 Monday 07 July 2025 20:17:29 -0400 (0:00:00.366) 0:00:56.990 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/parse_quadlet_file.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Slurp quadlet file] ******************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/parse_quadlet_file.yml:6 Monday 07 July 2025 20:17:29 -0400 (0:00:00.054) 0:00:57.045 *********** ok: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Parse quadlet file] ******************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/parse_quadlet_file.yml:12 Monday 07 July 2025 20:17:29 -0400 (0:00:00.433) 0:00:57.479 *********** fatal: [managed-node2]: FAILED! => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result" } TASK [Debug3] ****************************************************************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:127 Monday 07 July 2025 20:17:29 -0400 (0:00:00.038) 0:00:57.517 *********** ok: [managed-node2] => { "changed": false, "cmd": "set -x\nset -o pipefail\nexec 1>&2\n#podman volume rm --all\n#podman network prune -f\npodman volume ls\npodman network ls\npodman secret ls\npodman container ls\npodman pod ls\npodman images\nsystemctl list-units | grep quadlet\nsystemctl list-unit-files | grep quadlet\nls -alrtF /etc/containers/systemd\n/usr/libexec/podman/quadlet -dryrun -v -no-kmsg-log\n", "delta": "0:00:00.553045", "end": "2025-07-07 20:17:30.621028", "rc": 0, "start": "2025-07-07 20:17:30.067983" } STDERR: + set -o pipefail + exec + podman volume ls DRIVER VOLUME NAME + podman network ls NETWORK ID NAME DRIVER 2f259bab93aa podman bridge 0a842076c753 podman-default-kube-network bridge + podman secret ls ID NAME DRIVER CREATED UPDATED + podman container ls CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 8854ba6a76c4 46 seconds ago Up 47 seconds quadlet-pod-infra b345bf186b4d quay.io/libpod/testimage:20210610 /bin/busybox-extr... 41 seconds ago Up 41 seconds quadlet-pod-container + podman pod ls POD ID NAME STATUS CREATED INFRA ID # OF CONTAINERS e3b447f755bf quadlet-pod Running 46 seconds ago 8854ba6a76c4 2 + podman images REPOSITORY TAG IMAGE ID CREATED SIZE quay.io/libpod/testimage 20210610 9f9ec7f2fdef 4 years ago 7.99 MB + systemctl list-units + grep quadlet quadlet-pod-container.service loaded active running quadlet-pod-container.service quadlet-pod-pod-pod.service loaded active running quadlet-pod-pod-pod.service + systemctl list-unit-files + grep quadlet quadlet-pod-container.service generated - quadlet-pod-pod-pod.service generated - + ls -alrtF /etc/containers/systemd total 8 drwxr-xr-x. 9 root root 178 Jul 7 20:16 ../ -rw-r--r--. 1 root root 70 Jul 7 20:16 quadlet-pod-pod.pod -rw-r--r--. 1 root root 230 Jul 7 20:16 quadlet-pod-container.container drwxr-xr-x. 2 root root 72 Jul 7 20:16 ./ + /usr/libexec/podman/quadlet -dryrun -v -no-kmsg-log quadlet-generator[65635]: Loading source unit file /etc/containers/systemd/quadlet-pod-container.container quadlet-generator[65635]: Loading source unit file /etc/containers/systemd/quadlet-pod-pod.pod ---quadlet-pod-container.service--- # # Ansible managed # # system_role:podman [Install] WantedBy=default.target [X-Container] Image=quay.io/libpod/testimage:20210610 ContainerName=quadlet-pod-container Pod=quadlet-pod-pod.pod Exec=/bin/busybox-extras httpd -f -p 80 [Unit] Wants=network-online.target After=network-online.target SourcePath=/etc/containers/systemd/quadlet-pod-container.container RequiresMountsFor=%t/containers BindsTo=quadlet-pod-pod-pod.service After=quadlet-pod-pod-pod.service [Service] Environment=PODMAN_SYSTEMD_UNIT=%n KillMode=mixed ExecStop=/usr/bin/podman rm -v -f -i --cidfile=%t/%N.cid ExecStopPost=-/usr/bin/podman rm -v -f -i --cidfile=%t/%N.cid Delegate=yes Type=notify NotifyAccess=all SyslogIdentifier=%N ExecStart=/usr/bin/podman run --name quadlet-pod-container --cidfile=%t/%N.cid --replace --rm --cgroups=split --sdnotify=conmon -d --pod-id-file %t/quadlet-pod-pod-pod.pod-id quay.io/libpod/testimage:20210610 /bin/busybox-extras httpd -f -p 80 ---quadlet-pod-pod-pod.service--- # # Ansible managed # # system_role:podman [X-Pod] PodName=quadlet-pod [Unit] Wants=network-online.target After=network-online.target SourcePath=/etc/containers/systemd/quadlet-pod-pod.pod RequiresMountsFor=%t/containers Wants=quadlet-pod-container.service Before=quadlet-pod-container.service [Service] SyslogIdentifier=%N ExecStart=/usr/bin/podman pod start --pod-id-file=%t/%N.pod-id ExecStop=/usr/bin/podman pod stop --pod-id-file=%t/%N.pod-id --ignore --time=10 ExecStopPost=/usr/bin/podman pod rm --pod-id-file=%t/%N.pod-id --ignore --force ExecStartPre=/usr/bin/podman pod create --infra-conmon-pidfile=%t/%N.pid --pod-id-file=%t/%N.pod-id --exit-policy=stop --replace --infra-name quadlet-pod-infra --name quadlet-pod Environment=PODMAN_SYSTEMD_UNIT=%n Type=forking Restart=on-failure PIDFile=%t/%N.pid TASK [Check AVCs] ************************************************************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:146 Monday 07 July 2025 20:17:30 -0400 (0:00:00.904) 0:00:58.421 *********** ok: [managed-node2] => { "changed": false, "cmd": [ "grep", "type=AVC", "/var/log/audit/audit.log" ], "delta": "0:00:00.005714", "end": "2025-07-07 20:17:30.973623", "failed_when_result": false, "rc": 1, "start": "2025-07-07 20:17:30.967909" } MSG: non-zero return code TASK [Dump journal] ************************************************************ task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:151 Monday 07 July 2025 20:17:31 -0400 (0:00:00.349) 0:00:58.771 *********** fatal: [managed-node2]: FAILED! => { "changed": false, "cmd": [ "journalctl", "-ex" ], "delta": "0:00:00.039678", "end": "2025-07-07 20:17:31.360118", "failed_when_result": true, "rc": 0, "start": "2025-07-07 20:17:31.320440" } STDOUT: Jul 07 20:13:28 managed-node2 /usr/bin/podman[29671]: time="2025-07-07T20:13:28-04:00" level=debug msg="backingFs=xfs, projectQuotaSupported=false, useNativeDiff=true, usingMetacopy=false" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29671]: time="2025-07-07T20:13:28-04:00" level=debug msg="Initializing event backend file" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29671]: time="2025-07-07T20:13:28-04:00" level=debug msg="Configured OCI runtime crun-wasm initialization failed: no valid executable found for OCI runtime crun-wasm: invalid argument" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29671]: time="2025-07-07T20:13:28-04:00" level=debug msg="Configured OCI runtime runc initialization failed: no valid executable found for OCI runtime runc: invalid argument" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29671]: time="2025-07-07T20:13:28-04:00" level=debug msg="Configured OCI runtime runj initialization failed: no valid executable found for OCI runtime runj: invalid argument" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29671]: time="2025-07-07T20:13:28-04:00" level=debug msg="Configured OCI runtime kata initialization failed: no valid executable found for OCI runtime kata: invalid argument" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29671]: time="2025-07-07T20:13:28-04:00" level=debug msg="Configured OCI runtime ocijail initialization failed: no valid executable found for OCI runtime ocijail: invalid argument" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29671]: time="2025-07-07T20:13:28-04:00" level=debug msg="Configured OCI runtime crun-vm initialization failed: no valid executable found for OCI runtime crun-vm: invalid argument" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29671]: time="2025-07-07T20:13:28-04:00" level=debug msg="Configured OCI runtime runsc initialization failed: no valid executable found for OCI runtime runsc: invalid argument" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29671]: time="2025-07-07T20:13:28-04:00" level=debug msg="Configured OCI runtime youki initialization failed: no valid executable found for OCI runtime youki: invalid argument" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29671]: time="2025-07-07T20:13:28-04:00" level=debug msg="Configured OCI runtime krun initialization failed: no valid executable found for OCI runtime krun: invalid argument" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29671]: time="2025-07-07T20:13:28-04:00" level=debug msg="Using OCI runtime \"/usr/bin/crun\"" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29671]: time="2025-07-07T20:13:28-04:00" level=info msg="Setting parallel job count to 7" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29671]: time="2025-07-07T20:13:28-04:00" level=debug msg="Called cleanup.PersistentPostRunE(/usr/bin/podman --root /home/podman_basic_user/.local/share/containers/storage --runroot /run/user/3001/containers --log-level debug --cgroup-manager systemd --tmpdir /run/user/3001/libpod/tmp --network-config-dir --network-backend netavark --volumepath /home/podman_basic_user/.local/share/containers/storage/volumes --db-backend sqlite --transient-store=false --runtime crun --storage-driver overlay --events-backend file --syslog container cleanup --stopped-only 19edcb90781cf21e1139d2477490fcc52d381e9fa210ccb4a2abe674b2dc35a8)" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29671]: time="2025-07-07T20:13:28-04:00" level=debug msg="Shutting down engines" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29671]: time="2025-07-07T20:13:28-04:00" level=info msg="Received shutdown.Stop(), terminating!" PID=29671 Jul 07 20:13:28 managed-node2 aardvark-dns[29155]: Received SIGHUP Jul 07 20:13:28 managed-node2 aardvark-dns[29155]: Successfully parsed config Jul 07 20:13:28 managed-node2 aardvark-dns[29155]: Listen v4 ip {} Jul 07 20:13:28 managed-node2 aardvark-dns[29155]: Listen v6 ip {} Jul 07 20:13:28 managed-node2 aardvark-dns[29155]: No configuration found stopping the sever Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Called cleanup.PersistentPreRunE(/usr/bin/podman --root /home/podman_basic_user/.local/share/containers/storage --runroot /run/user/3001/containers --log-level debug --cgroup-manager systemd --tmpdir /run/user/3001/libpod/tmp --network-config-dir --network-backend netavark --volumepath /home/podman_basic_user/.local/share/containers/storage/volumes --db-backend sqlite --transient-store=false --runtime crun --storage-driver overlay --events-backend file --syslog container cleanup --stopped-only e9692bbfc519ef92cef48f387f6e39e18dec1d44e7caa03632016f9015c87147)" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Setting custom database backend: \"sqlite\"" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Using conmon: \"/usr/bin/conmon\"" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=info msg="Using sqlite as database backend" Jul 07 20:13:28 managed-node2 kernel: podman1: port 1(veth0) entered disabled state Jul 07 20:13:28 managed-node2 kernel: veth0 (unregistering): left allmulticast mode Jul 07 20:13:28 managed-node2 kernel: veth0 (unregistering): left promiscuous mode Jul 07 20:13:28 managed-node2 kernel: podman1: port 1(veth0) entered disabled state Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="systemd-logind: Unknown object '/'." Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Using graph driver overlay" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Using graph root /home/podman_basic_user/.local/share/containers/storage" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Using run root /run/user/3001/containers" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Using static dir /home/podman_basic_user/.local/share/containers/storage/libpod" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Using tmp dir /run/user/3001/libpod/tmp" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Using volume path /home/podman_basic_user/.local/share/containers/storage/volumes" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Using transient store: false" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="[graphdriver] trying provided driver \"overlay\"" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Cached value indicated that overlay is supported" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Cached value indicated that overlay is supported" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Cached value indicated that metacopy is not being used" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Cached value indicated that native-diff is usable" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="backingFs=xfs, projectQuotaSupported=false, useNativeDiff=true, usingMetacopy=false" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Initializing event backend file" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Configured OCI runtime runj initialization failed: no valid executable found for OCI runtime runj: invalid argument" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Configured OCI runtime kata initialization failed: no valid executable found for OCI runtime kata: invalid argument" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Configured OCI runtime runsc initialization failed: no valid executable found for OCI runtime runsc: invalid argument" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Configured OCI runtime youki initialization failed: no valid executable found for OCI runtime youki: invalid argument" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Configured OCI runtime krun initialization failed: no valid executable found for OCI runtime krun: invalid argument" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Configured OCI runtime crun-wasm initialization failed: no valid executable found for OCI runtime crun-wasm: invalid argument" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Configured OCI runtime ocijail initialization failed: no valid executable found for OCI runtime ocijail: invalid argument" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Configured OCI runtime crun-vm initialization failed: no valid executable found for OCI runtime crun-vm: invalid argument" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Configured OCI runtime runc initialization failed: no valid executable found for OCI runtime runc: invalid argument" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Using OCI runtime \"/usr/bin/crun\"" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=info msg="Setting parallel job count to 7" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Called cleanup.PersistentPostRunE(/usr/bin/podman --root /home/podman_basic_user/.local/share/containers/storage --runroot /run/user/3001/containers --log-level debug --cgroup-manager systemd --tmpdir /run/user/3001/libpod/tmp --network-config-dir --network-backend netavark --volumepath /home/podman_basic_user/.local/share/containers/storage/volumes --db-backend sqlite --transient-store=false --runtime crun --storage-driver overlay --events-backend file --syslog container cleanup --stopped-only e9692bbfc519ef92cef48f387f6e39e18dec1d44e7caa03632016f9015c87147)" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Shutting down engines" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=info msg="Received shutdown.Stop(), terminating!" PID=29684 Jul 07 20:13:28 managed-node2 systemd[27808]: Removed slice cgroup user-libpod_pod_0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f.slice. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 83 and the job result is done. Jul 07 20:13:28 managed-node2 systemd[27808]: user-libpod_pod_0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f.slice: Failed to open /run/user/3001/systemd/transient/user-libpod_pod_0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f.slice: No such file or directory Jul 07 20:13:28 managed-node2 systemd[27808]: user-libpod_pod_0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f.slice: Failed to open /run/user/3001/systemd/transient/user-libpod_pod_0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f.slice: No such file or directory Jul 07 20:13:29 managed-node2 systemd[27808]: user-libpod_pod_0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f.slice: Failed to open /run/user/3001/systemd/transient/user-libpod_pod_0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f.slice: No such file or directory Jul 07 20:13:29 managed-node2 podman[29660]: Pods stopped: Jul 07 20:13:29 managed-node2 podman[29660]: 0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f Jul 07 20:13:29 managed-node2 podman[29660]: Pods removed: Jul 07 20:13:29 managed-node2 podman[29660]: 0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f Jul 07 20:13:29 managed-node2 podman[29660]: Secrets removed: Jul 07 20:13:29 managed-node2 podman[29660]: Volumes removed: Jul 07 20:13:29 managed-node2 systemd[27808]: Created slice cgroup user-libpod_pod_a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4.slice. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 84. Jul 07 20:13:29 managed-node2 systemd[27808]: Started libcrun container. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 88. Jul 07 20:13:29 managed-node2 systemd[27808]: Started rootless-netns-281f12f7.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 92. Jul 07 20:13:29 managed-node2 kernel: podman1: port 1(veth0) entered blocking state Jul 07 20:13:29 managed-node2 kernel: podman1: port 1(veth0) entered disabled state Jul 07 20:13:29 managed-node2 kernel: veth0: entered allmulticast mode Jul 07 20:13:29 managed-node2 kernel: veth0: entered promiscuous mode Jul 07 20:13:29 managed-node2 kernel: podman1: port 1(veth0) entered blocking state Jul 07 20:13:29 managed-node2 kernel: podman1: port 1(veth0) entered forwarding state Jul 07 20:13:29 managed-node2 systemd[27808]: Started /usr/libexec/podman/aardvark-dns --config /run/user/3001/containers/networks/aardvark-dns -p 53 run. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 96. Jul 07 20:13:29 managed-node2 systemd[27808]: Started libcrun container. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 100. Jul 07 20:13:29 managed-node2 systemd[27808]: Started libcrun container. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 105. Jul 07 20:13:29 managed-node2 systemd[27808]: Started A template for running K8s workloads via podman-kube-play. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 70. Jul 07 20:13:29 managed-node2 podman[29660]: Pod: Jul 07 20:13:29 managed-node2 podman[29660]: a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4 Jul 07 20:13:29 managed-node2 podman[29660]: Container: Jul 07 20:13:29 managed-node2 podman[29660]: 98a702eb9e86b0efc7d3e6878bf2b4db5ac6ff3d0bc5383014d2958ce12dced5 Jul 07 20:13:29 managed-node2 sudo[29655]: pam_unix(sudo:session): session closed for user podman_basic_user Jul 07 20:13:30 managed-node2 python3.9[29983]: ansible-getent Invoked with database=passwd key=root fail_key=False service=None split=None Jul 07 20:13:30 managed-node2 python3.9[30133]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:13:32 managed-node2 python3.9[30284]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/httpd2.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:13:33 managed-node2 python3.9[30434]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd2 state=directory owner=root group=root recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:13:33 managed-node2 python3.9[30583]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd2-create state=directory owner=root group=root recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:13:34 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jul 07 20:13:34 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jul 07 20:13:34 managed-node2 podman[30766]: 2025-07-07 20:13:34.583080898 -0400 EDT m=+0.387686363 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610 Jul 07 20:13:35 managed-node2 python3.9[30929]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd2.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:13:35 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jul 07 20:13:35 managed-node2 python3.9[31078]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d state=directory owner=root group=0 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:13:35 managed-node2 python3.9[31227]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd2.yml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Jul 07 20:13:36 managed-node2 python3.9[31347]: ansible-ansible.legacy.copy Invoked with dest=/etc/containers/ansible-kubernetes.d/httpd2.yml owner=root group=0 mode=0644 src=/root/.ansible/tmp/ansible-tmp-1751933615.542873-13335-149986577229905/.source.yml _original_basename=.op5axps4 follow=False checksum=ce164467a3a112a82832f62e0fdfcaf3c7eecdd1 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:13:36 managed-node2 python3.9[31496]: ansible-containers.podman.podman_play Invoked with state=started debug=True log_level=debug kube_file=/etc/containers/ansible-kubernetes.d/httpd2.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None quiet=None recreate=None userns=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None Jul 07 20:13:36 managed-node2 podman[31503]: 2025-07-07 20:13:36.614627171 -0400 EDT m=+0.024918635 network create 0a842076c75338ed23c6283f120afac661325ee3d3188d4246c57fcbd0ff921c (name=podman-default-kube-network, type=bridge) Jul 07 20:13:36 managed-node2 systemd[1]: Created slice cgroup machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice. ░░ Subject: A start job for unit machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice has finished successfully. ░░ ░░ The job identifier is 1383. Jul 07 20:13:36 managed-node2 podman[31503]: 2025-07-07 20:13:36.657483611 -0400 EDT m=+0.067774829 container create b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 (image=, name=a89535868ec0-infra, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5) Jul 07 20:13:36 managed-node2 podman[31503]: 2025-07-07 20:13:36.663455669 -0400 EDT m=+0.073746755 pod create a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5 (image=, name=httpd2) Jul 07 20:13:36 managed-node2 podman[31503]: 2025-07-07 20:13:36.690624127 -0400 EDT m=+0.100915321 container create f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5, app=test, io.containers.autoupdate=registry, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0) Jul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7101] manager: (podman1): new Bridge device (/org/freedesktop/NetworkManager/Devices/3) Jul 07 20:13:36 managed-node2 kernel: podman1: port 1(veth0) entered blocking state Jul 07 20:13:36 managed-node2 kernel: podman1: port 1(veth0) entered disabled state Jul 07 20:13:36 managed-node2 kernel: veth0: entered allmulticast mode Jul 07 20:13:36 managed-node2 podman[31503]: 2025-07-07 20:13:36.667763108 -0400 EDT m=+0.078054303 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610 Jul 07 20:13:36 managed-node2 kernel: veth0: entered promiscuous mode Jul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7230] manager: (veth0): new Veth device (/org/freedesktop/NetworkManager/Devices/4) Jul 07 20:13:36 managed-node2 kernel: podman1: port 1(veth0) entered blocking state Jul 07 20:13:36 managed-node2 kernel: podman1: port 1(veth0) entered forwarding state Jul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7287] device (veth0): carrier: link connected Jul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7289] device (podman1): carrier: link connected Jul 07 20:13:36 managed-node2 systemd-udevd[31526]: Network interface NamePolicy= disabled on kernel command line. Jul 07 20:13:36 managed-node2 systemd-udevd[31524]: Network interface NamePolicy= disabled on kernel command line. Jul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7661] device (podman1): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external') Jul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7667] device (podman1): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'external') Jul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7674] device (podman1): Activation: starting connection 'podman1' (eac731d7-3726-4468-a790-cf1c7402dd92) Jul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7676] device (podman1): state change: disconnected -> prepare (reason 'none', managed-type: 'external') Jul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7680] device (podman1): state change: prepare -> config (reason 'none', managed-type: 'external') Jul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7682] device (podman1): state change: config -> ip-config (reason 'none', managed-type: 'external') Jul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7686] device (podman1): state change: ip-config -> ip-check (reason 'none', managed-type: 'external') Jul 07 20:13:36 managed-node2 systemd[1]: Starting Network Manager Script Dispatcher Service... ░░ Subject: A start job for unit NetworkManager-dispatcher.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has begun execution. ░░ ░░ The job identifier is 1388. Jul 07 20:13:36 managed-node2 systemd[1]: Started Network Manager Script Dispatcher Service. ░░ Subject: A start job for unit NetworkManager-dispatcher.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has finished successfully. ░░ ░░ The job identifier is 1388. Jul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7916] device (podman1): state change: ip-check -> secondaries (reason 'none', managed-type: 'external') Jul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7918] device (podman1): state change: secondaries -> activated (reason 'none', managed-type: 'external') Jul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7923] device (podman1): Activation: successful, device activated. Jul 07 20:13:36 managed-node2 systemd[1]: Started /usr/libexec/podman/aardvark-dns --config /run/containers/networks/aardvark-dns -p 53 run. ░░ Subject: A start job for unit run-rf8a9b32703c44fe9919a21200707a783.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit run-rf8a9b32703c44fe9919a21200707a783.scope has finished successfully. ░░ ░░ The job identifier is 1454. Jul 07 20:13:36 managed-node2 aardvark-dns[31613]: starting aardvark on a child with pid 31614 Jul 07 20:13:36 managed-node2 aardvark-dns[31614]: Successfully parsed config Jul 07 20:13:36 managed-node2 aardvark-dns[31614]: Listen v4 ip {"podman-default-kube-network": [10.89.0.1]} Jul 07 20:13:36 managed-node2 aardvark-dns[31614]: Listen v6 ip {} Jul 07 20:13:36 managed-node2 aardvark-dns[31614]: Using the following upstream servers: [10.29.169.13:53, 10.29.170.12:53, 10.2.32.1:53] Jul 07 20:13:36 managed-node2 systemd[1]: Started libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope. ░░ Subject: A start job for unit libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has finished successfully. ░░ ░░ The job identifier is 1458. Jul 07 20:13:36 managed-node2 conmon[31618]: conmon b54360e34ffcfca4fbf3 : addr{sun_family=AF_UNIX, sun_path=/proc/self/fd/12/attach} Jul 07 20:13:36 managed-node2 conmon[31618]: conmon b54360e34ffcfca4fbf3 : terminal_ctrl_fd: 12 Jul 07 20:13:36 managed-node2 conmon[31618]: conmon b54360e34ffcfca4fbf3 : winsz read side: 16, winsz write side: 17 Jul 07 20:13:36 managed-node2 systemd[1]: Started libcrun container. ░░ Subject: A start job for unit libpod-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has finished successfully. ░░ ░░ The job identifier is 1463. Jul 07 20:13:36 managed-node2 conmon[31618]: conmon b54360e34ffcfca4fbf3 : container PID: 31620 Jul 07 20:13:36 managed-node2 podman[31503]: 2025-07-07 20:13:36.960118446 -0400 EDT m=+0.370409669 container init b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 (image=, name=a89535868ec0-infra, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5) Jul 07 20:13:36 managed-node2 podman[31503]: 2025-07-07 20:13:36.963833697 -0400 EDT m=+0.374124951 container start b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 (image=, name=a89535868ec0-infra, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5) Jul 07 20:13:36 managed-node2 systemd[1]: Started libpod-conmon-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope. ░░ Subject: A start job for unit libpod-conmon-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-conmon-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope has finished successfully. ░░ ░░ The job identifier is 1468. Jul 07 20:13:36 managed-node2 conmon[31623]: conmon f451c929f398dfcc2303 : addr{sun_family=AF_UNIX, sun_path=/proc/self/fd/11/attach} Jul 07 20:13:36 managed-node2 conmon[31623]: conmon f451c929f398dfcc2303 : terminal_ctrl_fd: 11 Jul 07 20:13:36 managed-node2 conmon[31623]: conmon f451c929f398dfcc2303 : winsz read side: 15, winsz write side: 16 Jul 07 20:13:37 managed-node2 systemd[1]: Started libcrun container. ░░ Subject: A start job for unit libpod-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope has finished successfully. ░░ ░░ The job identifier is 1473. Jul 07 20:13:37 managed-node2 conmon[31623]: conmon f451c929f398dfcc2303 : container PID: 31625 Jul 07 20:13:37 managed-node2 podman[31503]: 2025-07-07 20:13:37.017137848 -0400 EDT m=+0.427429053 container init f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5, io.buildah.version=1.21.0, io.containers.autoupdate=registry, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage) Jul 07 20:13:37 managed-node2 podman[31503]: 2025-07-07 20:13:37.020217845 -0400 EDT m=+0.430509047 container start f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5, io.buildah.version=1.21.0, io.containers.autoupdate=registry, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage) Jul 07 20:13:37 managed-node2 podman[31503]: 2025-07-07 20:13:37.026166839 -0400 EDT m=+0.436457992 pod start a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5 (image=, name=httpd2) Jul 07 20:13:37 managed-node2 python3.9[31496]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE command: /usr/bin/podman play kube --start=true --log-level=debug /etc/containers/ansible-kubernetes.d/httpd2.yml Jul 07 20:13:37 managed-node2 python3.9[31496]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stdout: Pod: a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5 Container: f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 Jul 07 20:13:37 managed-node2 python3.9[31496]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stderr: time="2025-07-07T20:13:36-04:00" level=info msg="/usr/bin/podman filtering at log level debug" time="2025-07-07T20:13:36-04:00" level=debug msg="Called kube.PersistentPreRunE(/usr/bin/podman play kube --start=true --log-level=debug /etc/containers/ansible-kubernetes.d/httpd2.yml)" time="2025-07-07T20:13:36-04:00" level=debug msg="Using conmon: \"/usr/bin/conmon\"" time="2025-07-07T20:13:36-04:00" level=info msg="Using sqlite as database backend" time="2025-07-07T20:13:36-04:00" level=debug msg="Using graph driver overlay" time="2025-07-07T20:13:36-04:00" level=debug msg="Using graph root /var/lib/containers/storage" time="2025-07-07T20:13:36-04:00" level=debug msg="Using run root /run/containers/storage" time="2025-07-07T20:13:36-04:00" level=debug msg="Using static dir /var/lib/containers/storage/libpod" time="2025-07-07T20:13:36-04:00" level=debug msg="Using tmp dir /run/libpod" time="2025-07-07T20:13:36-04:00" level=debug msg="Using volume path /var/lib/containers/storage/volumes" time="2025-07-07T20:13:36-04:00" level=debug msg="Using transient store: false" time="2025-07-07T20:13:36-04:00" level=debug msg="[graphdriver] trying provided driver \"overlay\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Cached value indicated that overlay is supported" time="2025-07-07T20:13:36-04:00" level=debug msg="Cached value indicated that overlay is supported" time="2025-07-07T20:13:36-04:00" level=debug msg="Cached value indicated that metacopy is being used" time="2025-07-07T20:13:36-04:00" level=debug msg="Cached value indicated that native-diff is not being used" time="2025-07-07T20:13:36-04:00" level=info msg="Not using native diff for overlay, this may cause degraded performance for building images: kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled" time="2025-07-07T20:13:36-04:00" level=debug msg="backingFs=xfs, projectQuotaSupported=false, useNativeDiff=false, usingMetacopy=true" time="2025-07-07T20:13:36-04:00" level=debug msg="Initializing event backend journald" time="2025-07-07T20:13:36-04:00" level=debug msg="Configured OCI runtime runc initialization failed: no valid executable found for OCI runtime runc: invalid argument" time="2025-07-07T20:13:36-04:00" level=debug msg="Configured OCI runtime runj initialization failed: no valid executable found for OCI runtime runj: invalid argument" time="2025-07-07T20:13:36-04:00" level=debug msg="Configured OCI runtime kata initialization failed: no valid executable found for OCI runtime kata: invalid argument" time="2025-07-07T20:13:36-04:00" level=debug msg="Configured OCI runtime runsc initialization failed: no valid executable found for OCI runtime runsc: invalid argument" time="2025-07-07T20:13:36-04:00" level=debug msg="Configured OCI runtime krun initialization failed: no valid executable found for OCI runtime krun: invalid argument" time="2025-07-07T20:13:36-04:00" level=debug msg="Configured OCI runtime ocijail initialization failed: no valid executable found for OCI runtime ocijail: invalid argument" time="2025-07-07T20:13:36-04:00" level=debug msg="Configured OCI runtime crun-vm initialization failed: no valid executable found for OCI runtime crun-vm: invalid argument" time="2025-07-07T20:13:36-04:00" level=debug msg="Configured OCI runtime youki initialization failed: no valid executable found for OCI runtime youki: invalid argument" time="2025-07-07T20:13:36-04:00" level=debug msg="Configured OCI runtime crun-wasm initialization failed: no valid executable found for OCI runtime crun-wasm: invalid argument" time="2025-07-07T20:13:36-04:00" level=debug msg="Using OCI runtime \"/usr/bin/crun\"" time="2025-07-07T20:13:36-04:00" level=info msg="Setting parallel job count to 7" time="2025-07-07T20:13:36-04:00" level=debug msg="Successfully loaded network podman-default-kube-network: &{podman-default-kube-network 0a842076c75338ed23c6283f120afac661325ee3d3188d4246c57fcbd0ff921c bridge podman1 2025-07-07 20:11:21.084048926 -0400 EDT [{{{10.89.0.0 ffffff00}} 10.89.0.1 }] [] false false true [] map[] map[] map[driver:host-local]}" time="2025-07-07T20:13:36-04:00" level=debug msg="Successfully loaded 2 networks" time="2025-07-07T20:13:36-04:00" level=debug msg="Pod using bridge network mode" time="2025-07-07T20:13:36-04:00" level=debug msg="Created cgroup path machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice for parent machine.slice and name libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5" time="2025-07-07T20:13:36-04:00" level=debug msg="Created cgroup machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice" time="2025-07-07T20:13:36-04:00" level=debug msg="Got pod cgroup as machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice" time="2025-07-07T20:13:36-04:00" level=debug msg="no command or entrypoint provided, and no CMD or ENTRYPOINT from image: defaulting to empty string" time="2025-07-07T20:13:36-04:00" level=debug msg="using systemd mode: false" time="2025-07-07T20:13:36-04:00" level=debug msg="setting container name a89535868ec0-infra" time="2025-07-07T20:13:36-04:00" level=debug msg="Loading seccomp profile from \"/usr/share/containers/seccomp.json\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Allocated lock 1 for container b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2" time="2025-07-07T20:13:36-04:00" level=debug msg="Cached value indicated that idmapped mounts for overlay are supported" time="2025-07-07T20:13:36-04:00" level=debug msg="Created container \"b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Container \"b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2\" has work directory \"/var/lib/containers/storage/overlay-containers/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2/userdata\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Container \"b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2\" has run directory \"/run/containers/storage/overlay-containers/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2/userdata\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Looking up image \"quay.io/libpod/testimage:20210610\" in local containers storage" time="2025-07-07T20:13:36-04:00" level=debug msg="Normalized platform linux/amd64 to {amd64 linux [] }" time="2025-07-07T20:13:36-04:00" level=debug msg="Trying \"quay.io/libpod/testimage:20210610\" ..." time="2025-07-07T20:13:36-04:00" level=debug msg="parsed reference into \"[overlay@/var/lib/containers/storage+/run/containers/storage:overlay.mountopt=nodev,metacopy=on]@9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Found image \"quay.io/libpod/testimage:20210610\" as \"quay.io/libpod/testimage:20210610\" in local containers storage" time="2025-07-07T20:13:36-04:00" level=debug msg="Found image \"quay.io/libpod/testimage:20210610\" as \"quay.io/libpod/testimage:20210610\" in local containers storage ([overlay@/var/lib/containers/storage+/run/containers/storage:overlay.mountopt=nodev,metacopy=on]@9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f)" time="2025-07-07T20:13:36-04:00" level=debug msg="exporting opaque data as blob \"sha256:9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Pulling image quay.io/libpod/testimage:20210610 (policy: missing)" time="2025-07-07T20:13:36-04:00" level=debug msg="Looking up image \"quay.io/libpod/testimage:20210610\" in local containers storage" time="2025-07-07T20:13:36-04:00" level=debug msg="Normalized platform linux/amd64 to {amd64 linux [] }" time="2025-07-07T20:13:36-04:00" level=debug msg="Trying \"quay.io/libpod/testimage:20210610\" ..." time="2025-07-07T20:13:36-04:00" level=debug msg="parsed reference into \"[overlay@/var/lib/containers/storage+/run/containers/storage:overlay.mountopt=nodev,metacopy=on]@9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Found image \"quay.io/libpod/testimage:20210610\" as \"quay.io/libpod/testimage:20210610\" in local containers storage" time="2025-07-07T20:13:36-04:00" level=debug msg="Found image \"quay.io/libpod/testimage:20210610\" as \"quay.io/libpod/testimage:20210610\" in local containers storage ([overlay@/var/lib/containers/storage+/run/containers/storage:overlay.mountopt=nodev,metacopy=on]@9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f)" time="2025-07-07T20:13:36-04:00" level=debug msg="exporting opaque data as blob \"sha256:9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Inspecting image 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f" time="2025-07-07T20:13:36-04:00" level=debug msg="exporting opaque data as blob \"sha256:9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\"" time="2025-07-07T20:13:36-04:00" level=debug msg="exporting opaque data as blob \"sha256:9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Looking up image \"quay.io/libpod/testimage:20210610\" in local containers storage" time="2025-07-07T20:13:36-04:00" level=debug msg="Normalized platform linux/amd64 to {amd64 linux [] }" time="2025-07-07T20:13:36-04:00" level=debug msg="Trying \"quay.io/libpod/testimage:20210610\" ..." time="2025-07-07T20:13:36-04:00" level=debug msg="parsed reference into \"[overlay@/var/lib/containers/storage+/run/containers/storage:overlay.mountopt=nodev,metacopy=on]@9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Found image \"quay.io/libpod/testimage:20210610\" as \"quay.io/libpod/testimage:20210610\" in local containers storage" time="2025-07-07T20:13:36-04:00" level=debug msg="Found image \"quay.io/libpod/testimage:20210610\" as \"quay.io/libpod/testimage:20210610\" in local containers storage ([overlay@/var/lib/containers/storage+/run/containers/storage:overlay.mountopt=nodev,metacopy=on]@9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f)" time="2025-07-07T20:13:36-04:00" level=debug msg="exporting opaque data as blob \"sha256:9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Inspecting image 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f" time="2025-07-07T20:13:36-04:00" level=debug msg="exporting opaque data as blob \"sha256:9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\"" time="2025-07-07T20:13:36-04:00" level=debug msg="exporting opaque data as blob \"sha256:9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Inspecting image 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f" time="2025-07-07T20:13:36-04:00" level=debug msg="Inspecting image 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f" time="2025-07-07T20:13:36-04:00" level=debug msg="using systemd mode: false" time="2025-07-07T20:13:36-04:00" level=debug msg="adding container to pod httpd2" time="2025-07-07T20:13:36-04:00" level=debug msg="setting container name httpd2-httpd2" time="2025-07-07T20:13:36-04:00" level=debug msg="Loading seccomp profile from \"/usr/share/containers/seccomp.json\"" time="2025-07-07T20:13:36-04:00" level=info msg="Sysctl net.ipv4.ping_group_range=0 0 ignored in containers.conf, since Network Namespace set to host" time="2025-07-07T20:13:36-04:00" level=debug msg="Adding mount /proc" time="2025-07-07T20:13:36-04:00" level=debug msg="Adding mount /dev" time="2025-07-07T20:13:36-04:00" level=debug msg="Adding mount /dev/pts" time="2025-07-07T20:13:36-04:00" level=debug msg="Adding mount /dev/mqueue" time="2025-07-07T20:13:36-04:00" level=debug msg="Adding mount /sys" time="2025-07-07T20:13:36-04:00" level=debug msg="Adding mount /sys/fs/cgroup" time="2025-07-07T20:13:36-04:00" level=debug msg="Allocated lock 2 for container f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06" time="2025-07-07T20:13:36-04:00" level=debug msg="exporting opaque data as blob \"sha256:9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Created container \"f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Container \"f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06\" has work directory \"/var/lib/containers/storage/overlay-containers/f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06/userdata\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Container \"f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06\" has run directory \"/run/containers/storage/overlay-containers/f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06/userdata\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Strongconnecting node b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2" time="2025-07-07T20:13:36-04:00" level=debug msg="Pushed b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 onto stack" time="2025-07-07T20:13:36-04:00" level=debug msg="Finishing node b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2. Popped b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 off stack" time="2025-07-07T20:13:36-04:00" level=debug msg="Strongconnecting node f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06" time="2025-07-07T20:13:36-04:00" level=debug msg="Pushed f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 onto stack" time="2025-07-07T20:13:36-04:00" level=debug msg="Finishing node f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06. Popped f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 off stack" time="2025-07-07T20:13:36-04:00" level=debug msg="Made network namespace at /run/netns/netns-3dcd885d-1b51-2e38-72ff-33596f02c329 for container b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2" [DEBUG netavark::network::validation] Validating network namespace... [DEBUG netavark::commands::setup] Setting up... [INFO netavark::firewall] Using iptables firewall driver [DEBUG netavark::network::bridge] Setup network podman-default-kube-network [DEBUG netavark::network::bridge] Container interface name: eth0 with IP addresses [10.89.0.2/24] [DEBUG netavark::network::bridge] Bridge name: podman1 with IP addresses [10.89.0.1/24] [DEBUG netavark::network::core_utils] Setting sysctl value for net.ipv4.ip_forward to 1 [DEBUG netavark::network::core_utils] Setting sysctl value for /proc/sys/net/ipv4/conf/podman1/rp_filter to 2 time="2025-07-07T20:13:36-04:00" level=debug msg="Created root filesystem for container b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 at /var/lib/containers/storage/overlay-containers/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2/rootfs/merge" [DEBUG netavark::network::core_utils] Setting sysctl value for /proc/sys/net/ipv6/conf/eth0/autoconf to 0 [DEBUG netavark::network::core_utils] Setting sysctl value for /proc/sys/net/ipv4/conf/eth0/arp_notify to 1 [DEBUG netavark::network::core_utils] Setting sysctl value for /proc/sys/net/ipv4/conf/eth0/rp_filter to 2 [INFO netavark::network::netlink] Adding route (dest: 0.0.0.0/0 ,gw: 10.89.0.1, metric 100) [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK-4B9D9135B29BA created on table nat [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK_ISOLATION_2 created on table filter [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK_ISOLATION_3 created on table filter [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK_INPUT created on table filter [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK_FORWARD created on table filter [DEBUG netavark::firewall::varktables::helpers] rule -d 10.89.0.0/24 -j ACCEPT created on table nat and chain NETAVARK-4B9D9135B29BA [DEBUG netavark::firewall::varktables::helpers] rule ! -d 224.0.0.0/4 -j MASQUERADE created on table nat and chain NETAVARK-4B9D9135B29BA [DEBUG netavark::firewall::varktables::helpers] rule -s 10.89.0.0/24 -j NETAVARK-4B9D9135B29BA created on table nat and chain POSTROUTING [DEBUG netavark::firewall::varktables::helpers] rule -p udp -s 10.89.0.0/24 --dport 53 -j ACCEPT created on table filter and chain NETAVARK_INPUT [DEBUG netavark::firewall::varktables::helpers] rule -p tcp -s 10.89.0.0/24 --dport 53 -j ACCEPT created on table filter and chain NETAVARK_INPUT [DEBUG netavark::firewall::varktables::helpers] rule -m conntrack --ctstate INVALID -j DROP created on table filter and chain NETAVARK_FORWARD [DEBUG netavark::firewall::varktables::helpers] rule -d 10.89.0.0/24 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT created on table filter and chain NETAVARK_FORWARD [DEBUG netavark::firewall::varktables::helpers] rule -s 10.89.0.0/24 -j ACCEPT created on table filter and chain NETAVARK_FORWARD [DEBUG netavark::firewall::firewalld] Adding firewalld rules for network 10.89.0.0/24 [DEBUG netavark::firewall::firewalld] Adding subnet 10.89.0.0/24 to zone trusted as source [DEBUG netavark::network::core_utils] Setting sysctl value for net.ipv4.conf.podman1.route_localnet to 1 [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK-HOSTPORT-SETMARK created on table nat [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK-HOSTPORT-MASQ created on table nat [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK-DN-4B9D9135B29BA created on table nat [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK-HOSTPORT-DNAT created on table nat [DEBUG netavark::firewall::varktables::helpers] rule -j MARK --set-xmark 0x2000/0x2000 created on table nat and chain NETAVARK-HOSTPORT-SETMARK [DEBUG netavark::firewall::varktables::helpers] rule -j MASQUERADE -m comment --comment 'netavark portfw masq mark' -m mark --mark 0x2000/0x2000 created on table nat and chain NETAVARK-HOSTPORT-MASQ [DEBUG netavark::firewall::varktables::helpers] rule -j NETAVARK-HOSTPORT-SETMARK -s 10.89.0.0/24 -p tcp --dport 15002 created on table nat and chain NETAVARK-DN-4B9D9135B29BA [DEBUG netavark::firewall::varktables::helpers] rule -j NETAVARK-HOSTPORT-SETMARK -s 127.0.0.1 -p tcp --dport 15002 created on table nat and chain NETAVARK-DN-4B9D9135B29BA [DEBUG netavark::firewall::varktables::helpers] rule -j DNAT -p tcp --to-destination 10.89.0.2:80 --destination-port 15002 created on table nat and chain NETAVARK-DN-4B9D9135B29BA [DEBUG netavark::firewall::varktables::helpers] rule -j NETAVARK-DN-4B9D9135B29BA -p tcp --dport 15002 -m comment --comment 'dnat name: podman-default-kube-network id: b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2' created on table nat and chain NETAVARK-HOSTPORT-DNAT [DEBUG netavark::firewall::varktables::helpers] rule -j NETAVARK-HOSTPORT-DNAT -m addrtype --dst-type LOCAL created on table nat and chain PREROUTING [DEBUG netavark::firewall::varktables::helpers] rule -j NETAVARK-HOSTPORT-DNAT -m addrtype --dst-type LOCAL created on table nat and chain OUTPUT [DEBUG netavark::dns::aardvark] Spawning aardvark server [DEBUG netavark::dns::aardvark] start aardvark-dns: ["systemd-run", "-q", "--scope", "/usr/libexec/podman/aardvark-dns", "--config", "/run/containers/networks/aardvark-dns", "-p", "53", "run"] [DEBUG netavark::commands::setup] { "podman-default-kube-network": StatusBlock { dns_search_domains: Some( [ "dns.podman", ], ), dns_server_ips: Some( [ 10.89.0.1, ], ), interfaces: Some( { "eth0": NetInterface { mac_address: "ce:5c:7c:33:0d:65", subnets: Some( [ NetAddress { gateway: Some( 10.89.0.1, ), ipnet: 10.89.0.2/24, }, ], ), }, }, ), }, } [DEBUG netavark::commands::setup] Setup complete time="2025-07-07T20:13:36-04:00" level=debug msg="/proc/sys/crypto/fips_enabled does not contain '1', not adding FIPS mode bind mounts" time="2025-07-07T20:13:36-04:00" level=debug msg="Setting Cgroups for container b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 to machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice:libpod:b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2" time="2025-07-07T20:13:36-04:00" level=debug msg="reading hooks from /usr/share/containers/oci/hooks.d" time="2025-07-07T20:13:36-04:00" level=debug msg="Workdir \"/\" resolved to host path \"/var/lib/containers/storage/overlay-containers/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2/rootfs/merge\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Created OCI spec for container b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 at /var/lib/containers/storage/overlay-containers/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2/userdata/config.json" time="2025-07-07T20:13:36-04:00" level=debug msg="Created cgroup path machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice for parent machine.slice and name libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5" time="2025-07-07T20:13:36-04:00" level=debug msg="Created cgroup machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice" time="2025-07-07T20:13:36-04:00" level=debug msg="Got pod cgroup as machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice" time="2025-07-07T20:13:36-04:00" level=debug msg="/usr/bin/conmon messages will be logged to syslog" time="2025-07-07T20:13:36-04:00" level=debug msg="running conmon: /usr/bin/conmon" args="[--api-version 1 -c b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 -u b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2/userdata -p /run/containers/storage/overlay-containers/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2/userdata/pidfile -n a89535868ec0-infra --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 --full-attach -s -l journald --log-level debug --syslog --conmon-pidfile /run/containers/storage/overlay-containers/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2/userdata/conmon.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg debug --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg --syslog --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2]" time="2025-07-07T20:13:36-04:00" level=info msg="Running conmon under slice machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice and unitName libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope" time="2025-07-07T20:13:36-04:00" level=debug msg="Received: 31620" time="2025-07-07T20:13:36-04:00" level=info msg="Got Conmon PID as 31618" time="2025-07-07T20:13:36-04:00" level=debug msg="Created container b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 in OCI runtime" time="2025-07-07T20:13:36-04:00" level=debug msg="Adding nameserver(s) from network status of '[\"10.89.0.1\"]'" time="2025-07-07T20:13:36-04:00" level=debug msg="Adding search domain(s) from network status of '[\"dns.podman\"]'" time="2025-07-07T20:13:36-04:00" level=debug msg="Starting container b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 with command [/catatonit -P]" time="2025-07-07T20:13:36-04:00" level=debug msg="Started container b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2" time="2025-07-07T20:13:36-04:00" level=debug msg="overlay: mount_data=lowerdir=/var/lib/containers/storage/overlay/l/BSXLDW6S4QQFLDJH6Z45ODLX6A,upperdir=/var/lib/containers/storage/overlay/031fa2927da1e19a93d45dbb8f74acff9ff44a2167616de52d9d6bf7a2e6be26/diff,workdir=/var/lib/containers/storage/overlay/031fa2927da1e19a93d45dbb8f74acff9ff44a2167616de52d9d6bf7a2e6be26/work,nodev,metacopy=on,context=\"system_u:object_r:container_file_t:s0:c263,c753\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Mounted container \"f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06\" at \"/var/lib/containers/storage/overlay/031fa2927da1e19a93d45dbb8f74acff9ff44a2167616de52d9d6bf7a2e6be26/merged\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Created root filesystem for container f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 at /var/lib/containers/storage/overlay/031fa2927da1e19a93d45dbb8f74acff9ff44a2167616de52d9d6bf7a2e6be26/merged" time="2025-07-07T20:13:36-04:00" level=debug msg="/proc/sys/crypto/fips_enabled does not contain '1', not adding FIPS mode bind mounts" time="2025-07-07T20:13:36-04:00" level=debug msg="Setting Cgroups for container f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 to machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice:libpod:f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06" time="2025-07-07T20:13:36-04:00" level=debug msg="reading hooks from /usr/share/containers/oci/hooks.d" time="2025-07-07T20:13:36-04:00" level=debug msg="Workdir \"/var/www\" resolved to a volume or mount" time="2025-07-07T20:13:36-04:00" level=debug msg="Created OCI spec for container f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 at /var/lib/containers/storage/overlay-containers/f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06/userdata/config.json" time="2025-07-07T20:13:36-04:00" level=debug msg="Created cgroup path machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice for parent machine.slice and name libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5" time="2025-07-07T20:13:36-04:00" level=debug msg="Created cgroup machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice" time="2025-07-07T20:13:36-04:00" level=debug msg="Got pod cgroup as machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice" time="2025-07-07T20:13:36-04:00" level=debug msg="/usr/bin/conmon messages will be logged to syslog" time="2025-07-07T20:13:36-04:00" level=debug msg="running conmon: /usr/bin/conmon" args="[--api-version 1 -c f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 -u f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06/userdata -p /run/containers/storage/overlay-containers/f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06/userdata/pidfile -n httpd2-httpd2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 --full-attach -s -l journald --log-level debug --syslog --conmon-pidfile /run/containers/storage/overlay-containers/f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06/userdata/conmon.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg debug --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg --syslog --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06]" time="2025-07-07T20:13:36-04:00" level=info msg="Running conmon under slice machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice and unitName libpod-conmon-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope" time="2025-07-07T20:13:37-04:00" level=debug msg="Received: 31625" time="2025-07-07T20:13:37-04:00" level=info msg="Got Conmon PID as 31623" time="2025-07-07T20:13:37-04:00" level=debug msg="Created container f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 in OCI runtime" time="2025-07-07T20:13:37-04:00" level=debug msg="Starting container f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 with command [/bin/busybox-extras httpd -f -p 80]" time="2025-07-07T20:13:37-04:00" level=debug msg="Started container f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06" time="2025-07-07T20:13:37-04:00" level=debug msg="Called kube.PersistentPostRunE(/usr/bin/podman play kube --start=true --log-level=debug /etc/containers/ansible-kubernetes.d/httpd2.yml)" time="2025-07-07T20:13:37-04:00" level=debug msg="Shutting down engines" Jul 07 20:13:37 managed-node2 python3.9[31496]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE rc: 0 Jul 07 20:13:37 managed-node2 python3.9[31775]: ansible-systemd Invoked with daemon_reload=True scope=system daemon_reexec=False no_block=False name=None state=None enabled=None force=None masked=None Jul 07 20:13:37 managed-node2 systemd[1]: Reloading. Jul 07 20:13:37 managed-node2 systemd-rc-local-generator[31792]: /etc/rc.d/rc.local is not marked executable, skipping. Jul 07 20:13:38 managed-node2 python3.9[31958]: ansible-systemd Invoked with name=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service scope=system enabled=True daemon_reload=False daemon_reexec=False no_block=False state=None force=None masked=None Jul 07 20:13:38 managed-node2 systemd[1]: Reloading. Jul 07 20:13:38 managed-node2 systemd-rc-local-generator[31978]: /etc/rc.d/rc.local is not marked executable, skipping. Jul 07 20:13:39 managed-node2 python3.9[32143]: ansible-systemd Invoked with name=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service scope=system state=started daemon_reload=False daemon_reexec=False no_block=False enabled=None force=None masked=None Jul 07 20:13:39 managed-node2 systemd[1]: Created slice Slice /system/podman-kube. ░░ Subject: A start job for unit system-podman\x2dkube.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit system-podman\x2dkube.slice has finished successfully. ░░ ░░ The job identifier is 1479. Jul 07 20:13:39 managed-node2 systemd[1]: Starting A template for running K8s workloads via podman-kube-play... ░░ Subject: A start job for unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service has begun execution. ░░ ░░ The job identifier is 1478. Jul 07 20:13:39 managed-node2 podman[32147]: 2025-07-07 20:13:39.145431853 -0400 EDT m=+0.025527029 pod stop a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5 (image=, name=httpd2) Jul 07 20:13:46 managed-node2 systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit NetworkManager-dispatcher.service has successfully entered the 'dead' state. Jul 07 20:13:49 managed-node2 podman[32147]: time="2025-07-07T20:13:49-04:00" level=warning msg="StopSignal SIGTERM failed to stop container httpd2-httpd2 in 10 seconds, resorting to SIGKILL" Jul 07 20:13:49 managed-node2 systemd[1]: libpod-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope has successfully entered the 'dead' state. Jul 07 20:13:49 managed-node2 conmon[31623]: conmon f451c929f398dfcc2303 : container 31625 exited with status 137 Jul 07 20:13:49 managed-node2 conmon[31623]: conmon f451c929f398dfcc2303 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice/libpod-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope/container/memory.events Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.174232474 -0400 EDT m=+10.054327720 container died f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, io.containers.autoupdate=registry, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0) Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Called cleanup.PersistentPreRunE(/usr/bin/podman --root /var/lib/containers/storage --runroot /run/containers/storage --log-level debug --cgroup-manager systemd --tmpdir /run/libpod --network-config-dir --network-backend netavark --volumepath /var/lib/containers/storage/volumes --db-backend sqlite --transient-store=false --runtime crun --storage-driver overlay --storage-opt overlay.mountopt=nodev,metacopy=on --events-backend journald --syslog container cleanup --stopped-only f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06)" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Setting custom database backend: \"sqlite\"" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using conmon: \"/usr/bin/conmon\"" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=info msg="Using sqlite as database backend" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using graph driver overlay" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using graph root /var/lib/containers/storage" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using run root /run/containers/storage" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using static dir /var/lib/containers/storage/libpod" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using tmp dir /run/libpod" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using volume path /var/lib/containers/storage/volumes" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using transient store: false" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="[graphdriver] trying provided driver \"overlay\"" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Cached value indicated that overlay is supported" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Cached value indicated that overlay is supported" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Cached value indicated that metacopy is being used" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Cached value indicated that native-diff is not being used" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=info msg="Not using native diff for overlay, this may cause degraded performance for building images: kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="backingFs=xfs, projectQuotaSupported=false, useNativeDiff=false, usingMetacopy=true" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Initializing event backend journald" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime runc initialization failed: no valid executable found for OCI runtime runc: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime runj initialization failed: no valid executable found for OCI runtime runj: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime kata initialization failed: no valid executable found for OCI runtime kata: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime runsc initialization failed: no valid executable found for OCI runtime runsc: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime ocijail initialization failed: no valid executable found for OCI runtime ocijail: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime crun-vm initialization failed: no valid executable found for OCI runtime crun-vm: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime crun-wasm initialization failed: no valid executable found for OCI runtime crun-wasm: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime youki initialization failed: no valid executable found for OCI runtime youki: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime krun initialization failed: no valid executable found for OCI runtime krun: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using OCI runtime \"/usr/bin/crun\"" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=info msg="Setting parallel job count to 7" Jul 07 20:13:49 managed-node2 systemd[1]: var-lib-containers-storage-overlay-031fa2927da1e19a93d45dbb8f74acff9ff44a2167616de52d9d6bf7a2e6be26-merged.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay-031fa2927da1e19a93d45dbb8f74acff9ff44a2167616de52d9d6bf7a2e6be26-merged.mount has successfully entered the 'dead' state. Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.219658211 -0400 EDT m=+10.099753380 container cleanup f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry, app=test) Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Called cleanup.PersistentPostRunE(/usr/bin/podman --root /var/lib/containers/storage --runroot /run/containers/storage --log-level debug --cgroup-manager systemd --tmpdir /run/libpod --network-config-dir --network-backend netavark --volumepath /var/lib/containers/storage/volumes --db-backend sqlite --transient-store=false --runtime crun --storage-driver overlay --storage-opt overlay.mountopt=nodev,metacopy=on --events-backend journald --syslog container cleanup --stopped-only f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06)" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Shutting down engines" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=info msg="Received shutdown.Stop(), terminating!" PID=32158 Jul 07 20:13:49 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jul 07 20:13:49 managed-node2 systemd[1]: libpod-conmon-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-conmon-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope has successfully entered the 'dead' state. Jul 07 20:13:49 managed-node2 systemd[1]: libpod-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has successfully entered the 'dead' state. Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.241852628 -0400 EDT m=+10.121948152 container died b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 (image=, name=a89535868ec0-infra) Jul 07 20:13:49 managed-node2 aardvark-dns[31614]: Received SIGHUP Jul 07 20:13:49 managed-node2 systemd[1]: run-rf8a9b32703c44fe9919a21200707a783.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-rf8a9b32703c44fe9919a21200707a783.scope has successfully entered the 'dead' state. Jul 07 20:13:49 managed-node2 aardvark-dns[31614]: Successfully parsed config Jul 07 20:13:49 managed-node2 aardvark-dns[31614]: Listen v4 ip {} Jul 07 20:13:49 managed-node2 aardvark-dns[31614]: Listen v6 ip {} Jul 07 20:13:49 managed-node2 aardvark-dns[31614]: No configuration found stopping the sever Jul 07 20:13:49 managed-node2 kernel: podman1: port 1(veth0) entered disabled state Jul 07 20:13:49 managed-node2 kernel: veth0 (unregistering): left allmulticast mode Jul 07 20:13:49 managed-node2 kernel: veth0 (unregistering): left promiscuous mode Jul 07 20:13:49 managed-node2 kernel: podman1: port 1(veth0) entered disabled state Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Called cleanup.PersistentPreRunE(/usr/bin/podman --root /var/lib/containers/storage --runroot /run/containers/storage --log-level debug --cgroup-manager systemd --tmpdir /run/libpod --network-config-dir --network-backend netavark --volumepath /var/lib/containers/storage/volumes --db-backend sqlite --transient-store=false --runtime crun --storage-driver overlay --storage-opt overlay.mountopt=nodev,metacopy=on --events-backend journald --syslog container cleanup --stopped-only b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2)" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Setting custom database backend: \"sqlite\"" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using conmon: \"/usr/bin/conmon\"" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=info msg="Using sqlite as database backend" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using graph driver overlay" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using graph root /var/lib/containers/storage" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using run root /run/containers/storage" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using static dir /var/lib/containers/storage/libpod" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using tmp dir /run/libpod" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using volume path /var/lib/containers/storage/volumes" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using transient store: false" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="[graphdriver] trying provided driver \"overlay\"" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Cached value indicated that overlay is supported" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Cached value indicated that overlay is supported" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Cached value indicated that metacopy is being used" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Cached value indicated that native-diff is not being used" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=info msg="Not using native diff for overlay, this may cause degraded performance for building images: kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="backingFs=xfs, projectQuotaSupported=false, useNativeDiff=false, usingMetacopy=true" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Initializing event backend journald" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime kata initialization failed: no valid executable found for OCI runtime kata: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime runsc initialization failed: no valid executable found for OCI runtime runsc: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime krun initialization failed: no valid executable found for OCI runtime krun: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime ocijail initialization failed: no valid executable found for OCI runtime ocijail: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime crun-vm initialization failed: no valid executable found for OCI runtime crun-vm: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime crun-wasm initialization failed: no valid executable found for OCI runtime crun-wasm: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime youki initialization failed: no valid executable found for OCI runtime youki: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime runc initialization failed: no valid executable found for OCI runtime runc: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime runj initialization failed: no valid executable found for OCI runtime runj: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using OCI runtime \"/usr/bin/crun\"" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=info msg="Setting parallel job count to 7" Jul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.2949] device (podman1): state change: activated -> unmanaged (reason 'unmanaged', managed-type: 'removed') Jul 07 20:13:49 managed-node2 systemd[1]: Starting Network Manager Script Dispatcher Service... ░░ Subject: A start job for unit NetworkManager-dispatcher.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has begun execution. ░░ ░░ The job identifier is 1551. Jul 07 20:13:49 managed-node2 systemd[1]: Started Network Manager Script Dispatcher Service. ░░ Subject: A start job for unit NetworkManager-dispatcher.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has finished successfully. ░░ ░░ The job identifier is 1551. Jul 07 20:13:49 managed-node2 systemd[1]: run-netns-netns\x2d3dcd885d\x2d1b51\x2d2e38\x2d72ff\x2d33596f02c329.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-netns-netns\x2d3dcd885d\x2d1b51\x2d2e38\x2d72ff\x2d33596f02c329.mount has successfully entered the 'dead' state. Jul 07 20:13:49 managed-node2 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2-rootfs-merge.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay\x2dcontainers-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2-rootfs-merge.mount has successfully entered the 'dead' state. Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.470731214 -0400 EDT m=+10.350826660 container cleanup b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 (image=, name=a89535868ec0-infra, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5) Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Called cleanup.PersistentPostRunE(/usr/bin/podman --root /var/lib/containers/storage --runroot /run/containers/storage --log-level debug --cgroup-manager systemd --tmpdir /run/libpod --network-config-dir --network-backend netavark --volumepath /var/lib/containers/storage/volumes --db-backend sqlite --transient-store=false --runtime crun --storage-driver overlay --storage-opt overlay.mountopt=nodev,metacopy=on --events-backend journald --syslog container cleanup --stopped-only b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2)" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Shutting down engines" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=info msg="Received shutdown.Stop(), terminating!" PID=32170 Jul 07 20:13:49 managed-node2 systemd[1]: Stopping libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope... ░░ Subject: A stop job for unit libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has begun execution. ░░ ░░ The job identifier is 1618. Jul 07 20:13:49 managed-node2 systemd[1]: libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has successfully entered the 'dead' state. Jul 07 20:13:49 managed-node2 systemd[1]: Stopped libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope. ░░ Subject: A stop job for unit libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has finished. ░░ ░░ The job identifier is 1618 and the job result is done. Jul 07 20:13:49 managed-node2 systemd[1]: Removed slice cgroup machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice. ░░ Subject: A stop job for unit machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice has finished. ░░ ░░ The job identifier is 1617 and the job result is done. Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.543096205 -0400 EDT m=+10.423191413 container remove f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5, io.containers.autoupdate=registry, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0) Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.569252202 -0400 EDT m=+10.449347410 container remove b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 (image=, name=a89535868ec0-infra, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5) Jul 07 20:13:49 managed-node2 systemd[1]: machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice: Failed to open /run/systemd/transient/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice: No such file or directory Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.577427834 -0400 EDT m=+10.457523002 pod remove a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5 (image=, name=httpd2) Jul 07 20:13:49 managed-node2 podman[32147]: Pods stopped: Jul 07 20:13:49 managed-node2 podman[32147]: a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5 Jul 07 20:13:49 managed-node2 podman[32147]: Pods removed: Jul 07 20:13:49 managed-node2 podman[32147]: a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5 Jul 07 20:13:49 managed-node2 podman[32147]: Secrets removed: Jul 07 20:13:49 managed-node2 podman[32147]: Volumes removed: Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.577733569 -0400 EDT m=+10.457828934 network create 0a842076c75338ed23c6283f120afac661325ee3d3188d4246c57fcbd0ff921c (name=podman-default-kube-network, type=bridge) Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.601543415 -0400 EDT m=+10.481638618 container create 78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32 (image=, name=91ecf1609ad1-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service) Jul 07 20:13:49 managed-node2 systemd[1]: Created slice cgroup machine-libpod_pod_d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0.slice. ░░ Subject: A start job for unit machine-libpod_pod_d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit machine-libpod_pod_d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0.slice has finished successfully. ░░ ░░ The job identifier is 1619. Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.643353432 -0400 EDT m=+10.523448629 container create 08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b (image=, name=d17e7ef2e094-infra, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service) Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.649232898 -0400 EDT m=+10.529328325 pod create d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0 (image=, name=httpd2) Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.675477572 -0400 EDT m=+10.555572747 container create 3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, created_by=test/system/build-testimage, io.buildah.version=1.21.0, app=test, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service, created_at=2021-06-10T18:55:36Z) Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.675792512 -0400 EDT m=+10.555887719 container restart 78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32 (image=, name=91ecf1609ad1-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service) Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.651020296 -0400 EDT m=+10.531115614 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610 Jul 07 20:13:49 managed-node2 systemd[1]: Started libcrun container. ░░ Subject: A start job for unit libpod-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32.scope has finished successfully. ░░ ░░ The job identifier is 1623. Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.731153306 -0400 EDT m=+10.611248537 container init 78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32 (image=, name=91ecf1609ad1-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service) Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.734097189 -0400 EDT m=+10.614192506 container start 78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32 (image=, name=91ecf1609ad1-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service) Jul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7517] manager: (podman1): new Bridge device (/org/freedesktop/NetworkManager/Devices/5) Jul 07 20:13:49 managed-node2 systemd-udevd[32184]: Network interface NamePolicy= disabled on kernel command line. Jul 07 20:13:49 managed-node2 kernel: podman1: port 1(veth0) entered blocking state Jul 07 20:13:49 managed-node2 kernel: podman1: port 1(veth0) entered disabled state Jul 07 20:13:49 managed-node2 kernel: veth0: entered allmulticast mode Jul 07 20:13:49 managed-node2 kernel: veth0: entered promiscuous mode Jul 07 20:13:49 managed-node2 systemd-udevd[32189]: Network interface NamePolicy= disabled on kernel command line. Jul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7610] manager: (veth0): new Veth device (/org/freedesktop/NetworkManager/Devices/6) Jul 07 20:13:49 managed-node2 kernel: podman1: port 1(veth0) entered blocking state Jul 07 20:13:49 managed-node2 kernel: podman1: port 1(veth0) entered forwarding state Jul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7658] device (veth0): carrier: link connected Jul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7662] device (podman1): carrier: link connected Jul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7742] device (podman1): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external') Jul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7747] device (podman1): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'external') Jul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7759] device (podman1): Activation: starting connection 'podman1' (9a09baee-577d-45df-991f-e577871fe999) Jul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7761] device (podman1): state change: disconnected -> prepare (reason 'none', managed-type: 'external') Jul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7766] device (podman1): state change: prepare -> config (reason 'none', managed-type: 'external') Jul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7769] device (podman1): state change: config -> ip-config (reason 'none', managed-type: 'external') Jul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7773] device (podman1): state change: ip-config -> ip-check (reason 'none', managed-type: 'external') Jul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7875] device (podman1): state change: ip-check -> secondaries (reason 'none', managed-type: 'external') Jul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7879] device (podman1): state change: secondaries -> activated (reason 'none', managed-type: 'external') Jul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7899] device (podman1): Activation: successful, device activated. Jul 07 20:13:49 managed-node2 systemd[1]: Started /usr/libexec/podman/aardvark-dns --config /run/containers/networks/aardvark-dns -p 53 run. ░░ Subject: A start job for unit run-rce7152e4cf79441b86b3f3ed7d6f4283.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit run-rce7152e4cf79441b86b3f3ed7d6f4283.scope has finished successfully. ░░ ░░ The job identifier is 1627. Jul 07 20:13:49 managed-node2 systemd[1]: Started libcrun container. ░░ Subject: A start job for unit libpod-08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b.scope has finished successfully. ░░ ░░ The job identifier is 1631. Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.929395798 -0400 EDT m=+10.809491054 container init 08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b (image=, name=d17e7ef2e094-infra, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service) Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.932349651 -0400 EDT m=+10.812444917 container start 08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b (image=, name=d17e7ef2e094-infra, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service) Jul 07 20:13:49 managed-node2 systemd[1]: Started libcrun container. ░░ Subject: A start job for unit libpod-3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5.scope has finished successfully. ░░ ░░ The job identifier is 1636. Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.975052109 -0400 EDT m=+10.855147428 container init 3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0) Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.978227355 -0400 EDT m=+10.858322532 container start 3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, io.buildah.version=1.21.0, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage) Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.984086055 -0400 EDT m=+10.864181258 pod start d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0 (image=, name=httpd2) Jul 07 20:13:49 managed-node2 systemd[1]: Started A template for running K8s workloads via podman-kube-play. ░░ Subject: A start job for unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service has finished successfully. ░░ ░░ The job identifier is 1478. Jul 07 20:13:49 managed-node2 podman[32147]: Pod: Jul 07 20:13:49 managed-node2 podman[32147]: d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0 Jul 07 20:13:49 managed-node2 podman[32147]: Container: Jul 07 20:13:49 managed-node2 podman[32147]: 3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5 Jul 07 20:13:50 managed-node2 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2-userdata-shm.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay\x2dcontainers-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2-userdata-shm.mount has successfully entered the 'dead' state. Jul 07 20:13:51 managed-node2 python3.9[32468]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:13:52 managed-node2 python3.9[32619]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/httpd3.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:13:53 managed-node2 python3.9[32769]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd3 state=directory owner=root group=root recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:13:53 managed-node2 python3.9[32918]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd3-create state=directory owner=root group=root recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:13:54 managed-node2 podman[33098]: 2025-07-07 20:13:54.593475273 -0400 EDT m=+0.280680463 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610 Jul 07 20:13:55 managed-node2 python3.9[33262]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd3.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:13:55 managed-node2 python3.9[33411]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d state=directory owner=root group=0 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:13:55 managed-node2 python3.9[33560]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd3.yml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Jul 07 20:13:56 managed-node2 python3.9[33680]: ansible-ansible.legacy.copy Invoked with dest=/etc/containers/ansible-kubernetes.d/httpd3.yml owner=root group=0 mode=0644 src=/root/.ansible/tmp/ansible-tmp-1751933635.6006646-13961-107071336953239/.source.yml _original_basename=._73jk67j follow=False checksum=5b3685de46cacb0a0661419a5a5898cbb3cf431c backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:13:56 managed-node2 python3.9[33829]: ansible-containers.podman.podman_play Invoked with state=started kube_file=/etc/containers/ansible-kubernetes.d/httpd3.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None debug=None quiet=None recreate=None userns=None log_level=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None Jul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.647468979 -0400 EDT m=+0.017382508 network create 0a842076c75338ed23c6283f120afac661325ee3d3188d4246c57fcbd0ff921c (name=podman-default-kube-network, type=bridge) Jul 07 20:13:56 managed-node2 systemd[1]: Created slice cgroup machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice. ░░ Subject: A start job for unit machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice has finished successfully. ░░ ░░ The job identifier is 1641. Jul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.687774841 -0400 EDT m=+0.057688390 container create f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b (image=, name=06637f87acc7-infra, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7) Jul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.693778889 -0400 EDT m=+0.063692415 pod create 06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7 (image=, name=httpd3) Jul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.718042148 -0400 EDT m=+0.087955671 container create d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, app=test, io.containers.autoupdate=registry) Jul 07 20:13:56 managed-node2 kernel: podman1: port 2(veth1) entered blocking state Jul 07 20:13:56 managed-node2 kernel: podman1: port 2(veth1) entered disabled state Jul 07 20:13:56 managed-node2 kernel: veth1: entered allmulticast mode Jul 07 20:13:56 managed-node2 kernel: veth1: entered promiscuous mode Jul 07 20:13:56 managed-node2 NetworkManager[644]: [1751933636.7464] manager: (veth1): new Veth device (/org/freedesktop/NetworkManager/Devices/7) Jul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.695533824 -0400 EDT m=+0.065447531 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610 Jul 07 20:13:56 managed-node2 kernel: podman1: port 2(veth1) entered blocking state Jul 07 20:13:56 managed-node2 kernel: podman1: port 2(veth1) entered forwarding state Jul 07 20:13:56 managed-node2 NetworkManager[644]: [1751933636.7489] device (veth1): carrier: link connected Jul 07 20:13:56 managed-node2 systemd-udevd[33860]: Network interface NamePolicy= disabled on kernel command line. Jul 07 20:13:56 managed-node2 systemd[1]: Started libpod-conmon-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope. ░░ Subject: A start job for unit libpod-conmon-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-conmon-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope has finished successfully. ░░ ░░ The job identifier is 1646. Jul 07 20:13:56 managed-node2 systemd[1]: Started libcrun container. ░░ Subject: A start job for unit libpod-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope has finished successfully. ░░ ░░ The job identifier is 1651. Jul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.874054258 -0400 EDT m=+0.243967914 container init f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b (image=, name=06637f87acc7-infra, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7) Jul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.877423946 -0400 EDT m=+0.247337594 container start f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b (image=, name=06637f87acc7-infra, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7) Jul 07 20:13:56 managed-node2 systemd[1]: Started libpod-conmon-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope. ░░ Subject: A start job for unit libpod-conmon-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-conmon-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope has finished successfully. ░░ ░░ The job identifier is 1656. Jul 07 20:13:56 managed-node2 systemd[1]: Started libcrun container. ░░ Subject: A start job for unit libpod-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope has finished successfully. ░░ ░░ The job identifier is 1661. Jul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.931183676 -0400 EDT m=+0.301097308 container init d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry) Jul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.93420031 -0400 EDT m=+0.304113971 container start d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry, app=test) Jul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.940215927 -0400 EDT m=+0.310129474 pod start 06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7 (image=, name=httpd3) Jul 07 20:13:57 managed-node2 python3.9[34065]: ansible-systemd Invoked with daemon_reload=True scope=system daemon_reexec=False no_block=False name=None state=None enabled=None force=None masked=None Jul 07 20:13:57 managed-node2 systemd[1]: Reloading. Jul 07 20:13:57 managed-node2 systemd-rc-local-generator[34083]: /etc/rc.d/rc.local is not marked executable, skipping. Jul 07 20:13:58 managed-node2 python3.9[34248]: ansible-systemd Invoked with name=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service scope=system enabled=True daemon_reload=False daemon_reexec=False no_block=False state=None force=None masked=None Jul 07 20:13:58 managed-node2 systemd[1]: Reloading. Jul 07 20:13:58 managed-node2 systemd-rc-local-generator[34268]: /etc/rc.d/rc.local is not marked executable, skipping. Jul 07 20:13:59 managed-node2 python3.9[34433]: ansible-systemd Invoked with name=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service scope=system state=started daemon_reload=False daemon_reexec=False no_block=False enabled=None force=None masked=None Jul 07 20:13:59 managed-node2 systemd[1]: Starting A template for running K8s workloads via podman-kube-play... ░░ Subject: A start job for unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service has begun execution. ░░ ░░ The job identifier is 1666. Jul 07 20:13:59 managed-node2 podman[34437]: 2025-07-07 20:13:59.128818064 -0400 EDT m=+0.031043200 pod stop 06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7 (image=, name=httpd3) Jul 07 20:13:59 managed-node2 systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit NetworkManager-dispatcher.service has successfully entered the 'dead' state. Jul 07 20:14:09 managed-node2 podman[34437]: time="2025-07-07T20:14:09-04:00" level=warning msg="StopSignal SIGTERM failed to stop container httpd3-httpd3 in 10 seconds, resorting to SIGKILL" Jul 07 20:14:09 managed-node2 systemd[1]: libpod-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope has successfully entered the 'dead' state. Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.161891677 -0400 EDT m=+10.064117231 container died d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry, app=test, created_at=2021-06-10T18:55:36Z) Jul 07 20:14:09 managed-node2 systemd[1]: var-lib-containers-storage-overlay-ea9de557ba623f700a03785c93f2fae562cdde6abc47bc4578532dd100d74f80-merged.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay-ea9de557ba623f700a03785c93f2fae562cdde6abc47bc4578532dd100d74f80-merged.mount has successfully entered the 'dead' state. Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.208209429 -0400 EDT m=+10.110434520 container cleanup d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry) Jul 07 20:14:09 managed-node2 systemd[1]: libpod-conmon-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-conmon-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope has successfully entered the 'dead' state. Jul 07 20:14:09 managed-node2 systemd[1]: libpod-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope has successfully entered the 'dead' state. Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.228424072 -0400 EDT m=+10.130649401 container died f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b (image=, name=06637f87acc7-infra) Jul 07 20:14:09 managed-node2 kernel: podman1: port 2(veth1) entered disabled state Jul 07 20:14:09 managed-node2 kernel: veth1 (unregistering): left allmulticast mode Jul 07 20:14:09 managed-node2 kernel: veth1 (unregistering): left promiscuous mode Jul 07 20:14:09 managed-node2 kernel: podman1: port 2(veth1) entered disabled state Jul 07 20:14:09 managed-node2 systemd[1]: run-netns-netns\x2db10132db\x2d5af1\x2d0f8c\x2d38ab\x2d1e8eaa97e6f2.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-netns-netns\x2db10132db\x2d5af1\x2d0f8c\x2d38ab\x2d1e8eaa97e6f2.mount has successfully entered the 'dead' state. Jul 07 20:14:09 managed-node2 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b-rootfs-merge.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay\x2dcontainers-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b-rootfs-merge.mount has successfully entered the 'dead' state. Jul 07 20:14:09 managed-node2 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b-userdata-shm.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay\x2dcontainers-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b-userdata-shm.mount has successfully entered the 'dead' state. Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.330607359 -0400 EDT m=+10.232832448 container cleanup f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b (image=, name=06637f87acc7-infra, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7) Jul 07 20:14:09 managed-node2 systemd[1]: libpod-conmon-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-conmon-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope has successfully entered the 'dead' state. Jul 07 20:14:09 managed-node2 systemd[1]: Stopped libpod-conmon-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope. ░░ Subject: A stop job for unit libpod-conmon-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit libpod-conmon-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope has finished. ░░ ░░ The job identifier is 1740 and the job result is done. Jul 07 20:14:09 managed-node2 systemd[1]: Removed slice cgroup machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice. ░░ Subject: A stop job for unit machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice has finished. ░░ ░░ The job identifier is 1739 and the job result is done. Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.33777977 -0400 EDT m=+10.240004889 pod stop 06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7 (image=, name=httpd3) Jul 07 20:14:09 managed-node2 systemd[1]: machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice: Failed to open /run/systemd/transient/machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice: No such file or directory Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.341042664 -0400 EDT m=+10.243267751 pod stop 06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7 (image=, name=httpd3) Jul 07 20:14:09 managed-node2 systemd[1]: machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice: Failed to open /run/systemd/transient/machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice: No such file or directory Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.366974147 -0400 EDT m=+10.269199273 container remove d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry) Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.392885411 -0400 EDT m=+10.295110535 container remove f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b (image=, name=06637f87acc7-infra, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7) Jul 07 20:14:09 managed-node2 systemd[1]: machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice: Failed to open /run/systemd/transient/machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice: No such file or directory Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.400728494 -0400 EDT m=+10.302953580 pod remove 06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7 (image=, name=httpd3) Jul 07 20:14:09 managed-node2 podman[34437]: Pods stopped: Jul 07 20:14:09 managed-node2 podman[34437]: 06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7 Jul 07 20:14:09 managed-node2 podman[34437]: Pods removed: Jul 07 20:14:09 managed-node2 podman[34437]: 06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7 Jul 07 20:14:09 managed-node2 podman[34437]: Secrets removed: Jul 07 20:14:09 managed-node2 podman[34437]: Volumes removed: Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.40084298 -0400 EDT m=+10.303068230 network create 0a842076c75338ed23c6283f120afac661325ee3d3188d4246c57fcbd0ff921c (name=podman-default-kube-network, type=bridge) Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.421632285 -0400 EDT m=+10.323857401 container create eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc (image=, name=55c9b4d93968-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service) Jul 07 20:14:09 managed-node2 systemd[1]: Created slice cgroup machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice. ░░ Subject: A start job for unit machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice has finished successfully. ░░ ░░ The job identifier is 1741. Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.456874206 -0400 EDT m=+10.359099322 container create e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641 (image=, name=0b34d5e9f949-infra, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service) Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.463729321 -0400 EDT m=+10.365954523 pod create 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36 (image=, name=httpd3) Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.466690532 -0400 EDT m=+10.368915893 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610 Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.495205758 -0400 EDT m=+10.397430876 container create 9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, created_by=test/system/build-testimage, io.buildah.version=1.21.0, app=test, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service, created_at=2021-06-10T18:55:36Z) Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.495540708 -0400 EDT m=+10.397765831 container restart eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc (image=, name=55c9b4d93968-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service) Jul 07 20:14:09 managed-node2 systemd[1]: Started libcrun container. ░░ Subject: A start job for unit libpod-eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc.scope has finished successfully. ░░ ░░ The job identifier is 1745. Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.541938993 -0400 EDT m=+10.444164111 container init eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc (image=, name=55c9b4d93968-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service) Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.54556115 -0400 EDT m=+10.447786446 container start eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc (image=, name=55c9b4d93968-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service) Jul 07 20:14:09 managed-node2 kernel: podman1: port 2(veth1) entered blocking state Jul 07 20:14:09 managed-node2 kernel: podman1: port 2(veth1) entered disabled state Jul 07 20:14:09 managed-node2 kernel: veth1: entered allmulticast mode Jul 07 20:14:09 managed-node2 kernel: veth1: entered promiscuous mode Jul 07 20:14:09 managed-node2 kernel: podman1: port 2(veth1) entered blocking state Jul 07 20:14:09 managed-node2 kernel: podman1: port 2(veth1) entered forwarding state Jul 07 20:14:09 managed-node2 NetworkManager[644]: [1751933649.5632] manager: (veth1): new Veth device (/org/freedesktop/NetworkManager/Devices/8) Jul 07 20:14:09 managed-node2 NetworkManager[644]: [1751933649.5684] device (veth1): carrier: link connected Jul 07 20:14:09 managed-node2 systemd-udevd[34477]: Network interface NamePolicy= disabled on kernel command line. Jul 07 20:14:09 managed-node2 systemd[1]: Started libcrun container. ░░ Subject: A start job for unit libpod-e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641.scope has finished successfully. ░░ ░░ The job identifier is 1749. Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.678444807 -0400 EDT m=+10.580670010 container init e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641 (image=, name=0b34d5e9f949-infra, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service) Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.681382862 -0400 EDT m=+10.583608134 container start e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641 (image=, name=0b34d5e9f949-infra, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service) Jul 07 20:14:09 managed-node2 systemd[1]: Started libcrun container. ░░ Subject: A start job for unit libpod-9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109.scope has finished successfully. ░░ ░░ The job identifier is 1754. Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.728795591 -0400 EDT m=+10.631020730 container init 9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service, app=test) Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.73177019 -0400 EDT m=+10.633995385 container start 9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, io.buildah.version=1.21.0, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage) Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.737566302 -0400 EDT m=+10.639791423 pod start 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36 (image=, name=httpd3) Jul 07 20:14:09 managed-node2 podman[34437]: Pod: Jul 07 20:14:09 managed-node2 podman[34437]: 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36 Jul 07 20:14:09 managed-node2 podman[34437]: Container: Jul 07 20:14:09 managed-node2 podman[34437]: 9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109 Jul 07 20:14:09 managed-node2 systemd[1]: Started A template for running K8s workloads via podman-kube-play. ░░ Subject: A start job for unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service has finished successfully. ░░ ░░ The job identifier is 1666. Jul 07 20:14:10 managed-node2 sudo[34704]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:14:10 managed-node2 sudo[34704]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cflwckudrftyvniytbtokrawwncefyyk ; /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933650.260143-14379-210639111977054/AnsiballZ_command.py' Jul 07 20:14:10 managed-node2 sudo[34704]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0) Jul 07 20:14:10 managed-node2 python3.9[34706]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod inspect httpd1 --format '{{.State}}' _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:14:10 managed-node2 systemd[27808]: Started podman-34715.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 110. Jul 07 20:14:10 managed-node2 sudo[34704]: pam_unix(sudo:session): session closed for user podman_basic_user Jul 07 20:14:10 managed-node2 python3.9[34872]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod inspect httpd2 --format '{{.State}}' _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:14:11 managed-node2 python3.9[35029]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod inspect httpd3 --format '{{.State}}' _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:14:11 managed-node2 sudo[35186]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:14:11 managed-node2 sudo[35186]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-utoemynxahtksrgkxmppktxcnibfjzhy ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933651.5642703-14440-50388675366557/AnsiballZ_command.py' Jul 07 20:14:11 managed-node2 sudo[35186]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0) Jul 07 20:14:11 managed-node2 python3.9[35188]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail systemctl --user list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd1[.]yml[.]service[ ]+loaded[ ]+active ' _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:14:11 managed-node2 sudo[35186]: pam_unix(sudo:session): session closed for user podman_basic_user Jul 07 20:14:12 managed-node2 python3.9[35340]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail systemctl --system list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd2[.]yml[.]service[ ]+loaded[ ]+active ' _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:14:12 managed-node2 python3.9[35492]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail systemctl --system list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd3[.]yml[.]service[ ]+loaded[ ]+active ' _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:14:13 managed-node2 python3.9[35644]: ansible-ansible.legacy.uri Invoked with url=http://localhost:15001/index.txt return_content=True force=False http_agent=ansible-httpget use_proxy=True validate_certs=True force_basic_auth=False use_gssapi=False body_format=raw method=GET follow_redirects=safe status_code=[200] timeout=30 headers={} remote_src=False unredirected_headers=[] decompress=True use_netrc=True unsafe_writes=False url_username=None url_password=NOT_LOGGING_PARAMETER client_cert=None client_key=None dest=None body=None src=None creates=None removes=None unix_socket=None ca_path=None ciphers=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:14:13 managed-node2 python3.9[35795]: ansible-ansible.legacy.uri Invoked with url=http://localhost:15002/index.txt return_content=True force=False http_agent=ansible-httpget use_proxy=True validate_certs=True force_basic_auth=False use_gssapi=False body_format=raw method=GET follow_redirects=safe status_code=[200] timeout=30 headers={} remote_src=False unredirected_headers=[] decompress=True use_netrc=True unsafe_writes=False url_username=None url_password=NOT_LOGGING_PARAMETER client_cert=None client_key=None dest=None body=None src=None creates=None removes=None unix_socket=None ca_path=None ciphers=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:14:14 managed-node2 python3.9[35945]: ansible-ansible.legacy.command Invoked with _raw_params=ls -alrtF /tmp/lsr_8xkyz6d8_podman/httpd1-create _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:14:14 managed-node2 python3.9[36095]: ansible-ansible.legacy.command Invoked with _raw_params=ls -alrtF /tmp/lsr_8xkyz6d8_podman/httpd2-create _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:14:14 managed-node2 python3.9[36245]: ansible-ansible.legacy.command Invoked with _raw_params=ls -alrtF /tmp/lsr_8xkyz6d8_podman/httpd3-create _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:14:17 managed-node2 python3.9[36544]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:14:18 managed-node2 python3.9[36699]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:14:21 managed-node2 python3.9[36850]: ansible-ansible.legacy.dnf Invoked with name=['firewalld'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None Jul 07 20:14:23 managed-node2 python3.9[37000]: ansible-systemd Invoked with name=firewalld masked=False daemon_reload=False daemon_reexec=False scope=system no_block=False state=None enabled=None force=None Jul 07 20:14:23 managed-node2 python3.9[37151]: ansible-ansible.legacy.systemd Invoked with name=firewalld enabled=True state=started daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None Jul 07 20:14:24 managed-node2 python3.9[37302]: ansible-fedora.linux_system_roles.firewall_lib Invoked with port=['15001-15003/tcp'] permanent=True runtime=True state=enabled __report_changed=True online=True service=[] source_port=[] forward_port=[] rich_rule=[] source=[] interface=[] interface_pci_id=[] icmp_block=[] timeout=0 ipset_entries=[] protocol=[] helper_module=[] destination=[] includes=[] firewalld_conf=None masquerade=None icmp_block_inversion=None target=None zone=None set_default_zone=None ipset=None ipset_type=None description=None short=None Jul 07 20:14:26 managed-node2 python3.9[37451]: ansible-ansible.legacy.dnf Invoked with name=['python3-libselinux', 'python3-policycoreutils'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None Jul 07 20:14:27 managed-node2 python3.9[37601]: ansible-ansible.legacy.dnf Invoked with name=['grubby'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None Jul 07 20:14:29 managed-node2 python3.9[37751]: ansible-ansible.legacy.dnf Invoked with name=['policycoreutils-python-utils'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None Jul 07 20:14:30 managed-node2 python3.9[37901]: ansible-setup Invoked with filter=['ansible_selinux'] gather_subset=['all'] gather_timeout=10 fact_path=/etc/ansible/facts.d Jul 07 20:14:32 managed-node2 python3.9[38089]: ansible-fedora.linux_system_roles.local_seport Invoked with ports=['15001-15003'] proto=tcp setype=http_port_t state=present local=False ignore_selinux_state=False reload=True Jul 07 20:14:33 managed-node2 python3.9[38238]: ansible-fedora.linux_system_roles.selinux_modules_facts Invoked Jul 07 20:14:37 managed-node2 python3.9[38387]: ansible-getent Invoked with database=passwd key=podman_basic_user fail_key=False service=None split=None Jul 07 20:14:38 managed-node2 python3.9[38537]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:14:38 managed-node2 python3.9[38688]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:14:39 managed-node2 python3.9[38838]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:14:40 managed-node2 python3.9[38988]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:14:40 managed-node2 python3.9[39138]: ansible-ansible.legacy.command Invoked with creates=/var/lib/systemd/linger/podman_basic_user _raw_params=loginctl enable-linger podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None removes=None stdin=None Jul 07 20:14:41 managed-node2 python3.9[39287]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd1 state=directory owner=podman_basic_user group=3001 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:14:41 managed-node2 python3.9[39436]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd1-create state=directory owner=podman_basic_user group=3001 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:14:42 managed-node2 sudo[39585]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:14:42 managed-node2 sudo[39585]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dteqtauzgahpdwlqmxqoqvigwdlcbwgx ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933682.0866897-15498-43758369682888/AnsiballZ_podman_image.py' Jul 07 20:14:42 managed-node2 sudo[39585]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0) Jul 07 20:14:42 managed-node2 systemd[27808]: Started podman-39588.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 114. Jul 07 20:14:42 managed-node2 systemd[27808]: Started podman-39596.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 118. Jul 07 20:14:42 managed-node2 systemd[27808]: Started podman-39604.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 122. Jul 07 20:14:42 managed-node2 systemd[27808]: Started podman-39611.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 126. Jul 07 20:14:42 managed-node2 systemd[27808]: Started podman-39618.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 130. Jul 07 20:14:43 managed-node2 systemd[27808]: Started podman-39626.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 134. Jul 07 20:14:43 managed-node2 sudo[39585]: pam_unix(sudo:session): session closed for user podman_basic_user Jul 07 20:14:43 managed-node2 python3.9[39782]: ansible-stat Invoked with path=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:14:43 managed-node2 python3.9[39933]: ansible-file Invoked with path=/home/podman_basic_user/.config/containers/ansible-kubernetes.d state=directory owner=podman_basic_user group=3001 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:14:44 managed-node2 python3.9[40082]: ansible-ansible.legacy.stat Invoked with path=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Jul 07 20:14:44 managed-node2 python3.9[40157]: ansible-ansible.legacy.file Invoked with owner=podman_basic_user group=3001 mode=0644 dest=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml _original_basename=.3ieew216 recurse=False state=file path=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:14:44 managed-node2 sudo[40306]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:14:44 managed-node2 sudo[40306]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ustvjrkhcohlxhhxarkriiiprsdnnhal ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933684.6664407-15601-193248781752908/AnsiballZ_podman_play.py' Jul 07 20:14:44 managed-node2 sudo[40306]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0) Jul 07 20:14:44 managed-node2 python3.9[40308]: ansible-containers.podman.podman_play Invoked with state=started debug=True log_level=debug kube_file=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None quiet=None recreate=None userns=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None Jul 07 20:14:45 managed-node2 systemd[27808]: Started podman-40315.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 138. Jul 07 20:14:45 managed-node2 systemd[27808]: Created slice cgroup user-libpod_pod_f60d38092e129513b51a0b2f07fc5347e46ea863cdd2a5b1d5752245e63e591c.slice. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 142. Jul 07 20:14:45 managed-node2 python3.9[40308]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE command: /bin/podman play kube --start=true --log-level=debug /home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml Jul 07 20:14:45 managed-node2 python3.9[40308]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stdout: Jul 07 20:14:45 managed-node2 python3.9[40308]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stderr: time="2025-07-07T20:14:45-04:00" level=info msg="/bin/podman filtering at log level debug" time="2025-07-07T20:14:45-04:00" level=debug msg="Called kube.PersistentPreRunE(/bin/podman play kube --start=true --log-level=debug /home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml)" time="2025-07-07T20:14:45-04:00" level=debug msg="Using conmon: \"/usr/bin/conmon\"" time="2025-07-07T20:14:45-04:00" level=info msg="Using sqlite as database backend" time="2025-07-07T20:14:45-04:00" level=debug msg="systemd-logind: Unknown object '/'." time="2025-07-07T20:14:45-04:00" level=debug msg="Using graph driver overlay" time="2025-07-07T20:14:45-04:00" level=debug msg="Using graph root /home/podman_basic_user/.local/share/containers/storage" time="2025-07-07T20:14:45-04:00" level=debug msg="Using run root /run/user/3001/containers" time="2025-07-07T20:14:45-04:00" level=debug msg="Using static dir /home/podman_basic_user/.local/share/containers/storage/libpod" time="2025-07-07T20:14:45-04:00" level=debug msg="Using tmp dir /run/user/3001/libpod/tmp" time="2025-07-07T20:14:45-04:00" level=debug msg="Using volume path /home/podman_basic_user/.local/share/containers/storage/volumes" time="2025-07-07T20:14:45-04:00" level=debug msg="Using transient store: false" time="2025-07-07T20:14:45-04:00" level=debug msg="[graphdriver] trying provided driver \"overlay\"" time="2025-07-07T20:14:45-04:00" level=debug msg="Cached value indicated that overlay is supported" time="2025-07-07T20:14:45-04:00" level=debug msg="Cached value indicated that overlay is supported" time="2025-07-07T20:14:45-04:00" level=debug msg="Cached value indicated that metacopy is not being used" time="2025-07-07T20:14:45-04:00" level=debug msg="Cached value indicated that native-diff is usable" time="2025-07-07T20:14:45-04:00" level=debug msg="backingFs=xfs, projectQuotaSupported=false, useNativeDiff=true, usingMetacopy=false" time="2025-07-07T20:14:45-04:00" level=debug msg="Initializing event backend file" time="2025-07-07T20:14:45-04:00" level=debug msg="Configured OCI runtime krun initialization failed: no valid executable found for OCI runtime krun: invalid argument" time="2025-07-07T20:14:45-04:00" level=debug msg="Configured OCI runtime crun-vm initialization failed: no valid executable found for OCI runtime crun-vm: invalid argument" time="2025-07-07T20:14:45-04:00" level=debug msg="Configured OCI runtime crun-wasm initialization failed: no valid executable found for OCI runtime crun-wasm: invalid argument" time="2025-07-07T20:14:45-04:00" level=debug msg="Configured OCI runtime runc initialization failed: no valid executable found for OCI runtime runc: invalid argument" time="2025-07-07T20:14:45-04:00" level=debug msg="Configured OCI runtime ocijail initialization failed: no valid executable found for OCI runtime ocijail: invalid argument" time="2025-07-07T20:14:45-04:00" level=debug msg="Configured OCI runtime runj initialization failed: no valid executable found for OCI runtime runj: invalid argument" time="2025-07-07T20:14:45-04:00" level=debug msg="Configured OCI runtime kata initialization failed: no valid executable found for OCI runtime kata: invalid argument" time="2025-07-07T20:14:45-04:00" level=debug msg="Configured OCI runtime runsc initialization failed: no valid executable found for OCI runtime runsc: invalid argument" time="2025-07-07T20:14:45-04:00" level=debug msg="Configured OCI runtime youki initialization failed: no valid executable found for OCI runtime youki: invalid argument" time="2025-07-07T20:14:45-04:00" level=debug msg="Using OCI runtime \"/usr/bin/crun\"" time="2025-07-07T20:14:45-04:00" level=info msg="Setting parallel job count to 7" time="2025-07-07T20:14:45-04:00" level=debug msg="Successfully loaded network podman-default-kube-network: &{podman-default-kube-network f726a0dfc720eef9b785c3acdef2ddc0ef169e999e9185270f7b5fdceae44256 bridge podman1 2025-07-07 20:13:16.261934543 -0400 EDT [{{{10.89.0.0 ffffff00}} 10.89.0.1 }] [] false false true [] map[] map[] map[driver:host-local]}" time="2025-07-07T20:14:45-04:00" level=debug msg="Successfully loaded 2 networks" time="2025-07-07T20:14:45-04:00" level=debug msg="Pod using bridge network mode" time="2025-07-07T20:14:45-04:00" level=debug msg="Created cgroup path user.slice/user-libpod_pod_f60d38092e129513b51a0b2f07fc5347e46ea863cdd2a5b1d5752245e63e591c.slice for parent user.slice and name libpod_pod_f60d38092e129513b51a0b2f07fc5347e46ea863cdd2a5b1d5752245e63e591c" time="2025-07-07T20:14:45-04:00" level=debug msg="Created cgroup user.slice/user-libpod_pod_f60d38092e129513b51a0b2f07fc5347e46ea863cdd2a5b1d5752245e63e591c.slice" time="2025-07-07T20:14:45-04:00" level=debug msg="Got pod cgroup as user.slice/user-3001.slice/user@3001.service/user.slice/user-libpod_pod_f60d38092e129513b51a0b2f07fc5347e46ea863cdd2a5b1d5752245e63e591c.slice" Error: adding pod to state: name "httpd1" is in use: pod already exists time="2025-07-07T20:14:45-04:00" level=debug msg="Shutting down engines" time="2025-07-07T20:14:45-04:00" level=info msg="Received shutdown.Stop(), terminating!" PID=40315 Jul 07 20:14:45 managed-node2 python3.9[40308]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE rc: 125 Jul 07 20:14:45 managed-node2 sudo[40306]: pam_unix(sudo:session): session closed for user podman_basic_user Jul 07 20:14:46 managed-node2 python3.9[40471]: ansible-getent Invoked with database=passwd key=root fail_key=False service=None split=None Jul 07 20:14:46 managed-node2 python3.9[40621]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:14:47 managed-node2 python3.9[40772]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/httpd2.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:14:49 managed-node2 python3.9[40922]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd2 state=directory owner=root group=root recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:14:49 managed-node2 python3.9[41071]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd2-create state=directory owner=root group=root recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:14:50 managed-node2 podman[41251]: 2025-07-07 20:14:50.30172649 -0400 EDT m=+0.335741630 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610 Jul 07 20:14:50 managed-node2 python3.9[41415]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd2.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:14:51 managed-node2 python3.9[41566]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d state=directory owner=root group=0 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:14:51 managed-node2 python3.9[41715]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd2.yml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Jul 07 20:14:51 managed-node2 python3.9[41790]: ansible-ansible.legacy.file Invoked with owner=root group=0 mode=0644 dest=/etc/containers/ansible-kubernetes.d/httpd2.yml _original_basename=.7tnd0tsm recurse=False state=file path=/etc/containers/ansible-kubernetes.d/httpd2.yml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:14:52 managed-node2 python3.9[41939]: ansible-containers.podman.podman_play Invoked with state=started debug=True log_level=debug kube_file=/etc/containers/ansible-kubernetes.d/httpd2.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None quiet=None recreate=None userns=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None Jul 07 20:14:52 managed-node2 podman[41946]: 2025-07-07 20:14:52.281267633 -0400 EDT m=+0.019255481 network create 0a842076c75338ed23c6283f120afac661325ee3d3188d4246c57fcbd0ff921c (name=podman-default-kube-network, type=bridge) Jul 07 20:14:52 managed-node2 systemd[1]: Created slice cgroup machine-libpod_pod_55cf40f422ab8478e9c4da3bb3e4a8e5c2ce02f1f51e3c1d7ff9913ae8d6b45a.slice. ░░ Subject: A start job for unit machine-libpod_pod_55cf40f422ab8478e9c4da3bb3e4a8e5c2ce02f1f51e3c1d7ff9913ae8d6b45a.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit machine-libpod_pod_55cf40f422ab8478e9c4da3bb3e4a8e5c2ce02f1f51e3c1d7ff9913ae8d6b45a.slice has finished successfully. ░░ ░░ The job identifier is 1759. Jul 07 20:14:52 managed-node2 python3.9[41939]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE command: /usr/bin/podman play kube --start=true --log-level=debug /etc/containers/ansible-kubernetes.d/httpd2.yml Jul 07 20:14:52 managed-node2 python3.9[41939]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stdout: Jul 07 20:14:52 managed-node2 python3.9[41939]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stderr: time="2025-07-07T20:14:52-04:00" level=info msg="/usr/bin/podman filtering at log level debug" time="2025-07-07T20:14:52-04:00" level=debug msg="Called kube.PersistentPreRunE(/usr/bin/podman play kube --start=true --log-level=debug /etc/containers/ansible-kubernetes.d/httpd2.yml)" time="2025-07-07T20:14:52-04:00" level=debug msg="Using conmon: \"/usr/bin/conmon\"" time="2025-07-07T20:14:52-04:00" level=info msg="Using sqlite as database backend" time="2025-07-07T20:14:52-04:00" level=debug msg="Using graph driver overlay" time="2025-07-07T20:14:52-04:00" level=debug msg="Using graph root /var/lib/containers/storage" time="2025-07-07T20:14:52-04:00" level=debug msg="Using run root /run/containers/storage" time="2025-07-07T20:14:52-04:00" level=debug msg="Using static dir /var/lib/containers/storage/libpod" time="2025-07-07T20:14:52-04:00" level=debug msg="Using tmp dir /run/libpod" time="2025-07-07T20:14:52-04:00" level=debug msg="Using volume path /var/lib/containers/storage/volumes" time="2025-07-07T20:14:52-04:00" level=debug msg="Using transient store: false" time="2025-07-07T20:14:52-04:00" level=debug msg="[graphdriver] trying provided driver \"overlay\"" time="2025-07-07T20:14:52-04:00" level=debug msg="Cached value indicated that overlay is supported" time="2025-07-07T20:14:52-04:00" level=debug msg="Cached value indicated that overlay is supported" time="2025-07-07T20:14:52-04:00" level=debug msg="Cached value indicated that metacopy is being used" time="2025-07-07T20:14:52-04:00" level=debug msg="Cached value indicated that native-diff is not being used" time="2025-07-07T20:14:52-04:00" level=info msg="Not using native diff for overlay, this may cause degraded performance for building images: kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled" time="2025-07-07T20:14:52-04:00" level=debug msg="backingFs=xfs, projectQuotaSupported=false, useNativeDiff=false, usingMetacopy=true" time="2025-07-07T20:14:52-04:00" level=debug msg="Initializing event backend journald" time="2025-07-07T20:14:52-04:00" level=debug msg="Configured OCI runtime runc initialization failed: no valid executable found for OCI runtime runc: invalid argument" time="2025-07-07T20:14:52-04:00" level=debug msg="Configured OCI runtime runj initialization failed: no valid executable found for OCI runtime runj: invalid argument" time="2025-07-07T20:14:52-04:00" level=debug msg="Configured OCI runtime kata initialization failed: no valid executable found for OCI runtime kata: invalid argument" time="2025-07-07T20:14:52-04:00" level=debug msg="Configured OCI runtime krun initialization failed: no valid executable found for OCI runtime krun: invalid argument" time="2025-07-07T20:14:52-04:00" level=debug msg="Configured OCI runtime ocijail initialization failed: no valid executable found for OCI runtime ocijail: invalid argument" time="2025-07-07T20:14:52-04:00" level=debug msg="Configured OCI runtime crun-vm initialization failed: no valid executable found for OCI runtime crun-vm: invalid argument" time="2025-07-07T20:14:52-04:00" level=debug msg="Configured OCI runtime runsc initialization failed: no valid executable found for OCI runtime runsc: invalid argument" time="2025-07-07T20:14:52-04:00" level=debug msg="Configured OCI runtime youki initialization failed: no valid executable found for OCI runtime youki: invalid argument" time="2025-07-07T20:14:52-04:00" level=debug msg="Configured OCI runtime crun-wasm initialization failed: no valid executable found for OCI runtime crun-wasm: invalid argument" time="2025-07-07T20:14:52-04:00" level=debug msg="Using OCI runtime \"/usr/bin/crun\"" time="2025-07-07T20:14:52-04:00" level=info msg="Setting parallel job count to 7" time="2025-07-07T20:14:52-04:00" level=debug msg="Successfully loaded network podman-default-kube-network: &{podman-default-kube-network 0a842076c75338ed23c6283f120afac661325ee3d3188d4246c57fcbd0ff921c bridge podman1 2025-07-07 20:11:21.084048926 -0400 EDT [{{{10.89.0.0 ffffff00}} 10.89.0.1 }] [] false false true [] map[] map[] map[driver:host-local]}" time="2025-07-07T20:14:52-04:00" level=debug msg="Successfully loaded 2 networks" time="2025-07-07T20:14:52-04:00" level=debug msg="Pod using bridge network mode" time="2025-07-07T20:14:52-04:00" level=debug msg="Created cgroup path machine.slice/machine-libpod_pod_55cf40f422ab8478e9c4da3bb3e4a8e5c2ce02f1f51e3c1d7ff9913ae8d6b45a.slice for parent machine.slice and name libpod_pod_55cf40f422ab8478e9c4da3bb3e4a8e5c2ce02f1f51e3c1d7ff9913ae8d6b45a" time="2025-07-07T20:14:52-04:00" level=debug msg="Created cgroup machine.slice/machine-libpod_pod_55cf40f422ab8478e9c4da3bb3e4a8e5c2ce02f1f51e3c1d7ff9913ae8d6b45a.slice" time="2025-07-07T20:14:52-04:00" level=debug msg="Got pod cgroup as machine.slice/machine-libpod_pod_55cf40f422ab8478e9c4da3bb3e4a8e5c2ce02f1f51e3c1d7ff9913ae8d6b45a.slice" Error: adding pod to state: name "httpd2" is in use: pod already exists time="2025-07-07T20:14:52-04:00" level=debug msg="Shutting down engines" Jul 07 20:14:52 managed-node2 python3.9[41939]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE rc: 125 Jul 07 20:14:53 managed-node2 python3.9[42102]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:14:55 managed-node2 python3.9[42253]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/httpd3.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:14:56 managed-node2 python3.9[42403]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd3 state=directory owner=root group=root recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:14:56 managed-node2 python3.9[42552]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd3-create state=directory owner=root group=root recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:14:57 managed-node2 podman[42732]: 2025-07-07 20:14:57.595089727 -0400 EDT m=+0.334374931 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610 Jul 07 20:14:57 managed-node2 python3.9[42895]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd3.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:14:58 managed-node2 python3.9[43046]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d state=directory owner=root group=0 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:14:59 managed-node2 python3.9[43195]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd3.yml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Jul 07 20:14:59 managed-node2 python3.9[43270]: ansible-ansible.legacy.file Invoked with owner=root group=0 mode=0644 dest=/etc/containers/ansible-kubernetes.d/httpd3.yml _original_basename=.fnfhf1h4 recurse=False state=file path=/etc/containers/ansible-kubernetes.d/httpd3.yml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:14:59 managed-node2 python3.9[43419]: ansible-containers.podman.podman_play Invoked with state=started kube_file=/etc/containers/ansible-kubernetes.d/httpd3.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None debug=None quiet=None recreate=None userns=None log_level=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None Jul 07 20:14:59 managed-node2 podman[43426]: 2025-07-07 20:14:59.794215832 -0400 EDT m=+0.017981924 network create 0a842076c75338ed23c6283f120afac661325ee3d3188d4246c57fcbd0ff921c (name=podman-default-kube-network, type=bridge) Jul 07 20:14:59 managed-node2 systemd[1]: Created slice cgroup machine-libpod_pod_a956533ce71c546925cb35266c34fb208b1e49cd00e4934b1886b8ae13aea530.slice. ░░ Subject: A start job for unit machine-libpod_pod_a956533ce71c546925cb35266c34fb208b1e49cd00e4934b1886b8ae13aea530.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit machine-libpod_pod_a956533ce71c546925cb35266c34fb208b1e49cd00e4934b1886b8ae13aea530.slice has finished successfully. ░░ ░░ The job identifier is 1763. Jul 07 20:15:00 managed-node2 sudo[43582]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:15:00 managed-node2 sudo[43582]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-copmprovbtozwjdqvrxslhkmftgtigcs ; /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933700.520565-16475-105270240946276/AnsiballZ_command.py' Jul 07 20:15:00 managed-node2 sudo[43582]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0) Jul 07 20:15:00 managed-node2 python3.9[43584]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod inspect httpd1 --format '{{.State}}' _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:15:00 managed-node2 systemd[27808]: Started podman-43592.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 146. Jul 07 20:15:00 managed-node2 sudo[43582]: pam_unix(sudo:session): session closed for user podman_basic_user Jul 07 20:15:01 managed-node2 python3.9[43750]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod inspect httpd2 --format '{{.State}}' _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:15:01 managed-node2 python3.9[43908]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod inspect httpd3 --format '{{.State}}' _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:15:02 managed-node2 sudo[44065]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:15:02 managed-node2 sudo[44065]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-omhmgepybavqbezpokrriumisrazocox ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933701.8765578-16521-15111276780862/AnsiballZ_command.py' Jul 07 20:15:02 managed-node2 sudo[44065]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0) Jul 07 20:15:02 managed-node2 python3.9[44067]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail systemctl --user list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd1[.]yml[.]service[ ]+loaded[ ]+active ' _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:15:02 managed-node2 sudo[44065]: pam_unix(sudo:session): session closed for user podman_basic_user Jul 07 20:15:02 managed-node2 python3.9[44219]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail systemctl --system list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd2[.]yml[.]service[ ]+loaded[ ]+active ' _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:15:02 managed-node2 python3.9[44371]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail systemctl --system list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd3[.]yml[.]service[ ]+loaded[ ]+active ' _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:15:03 managed-node2 python3.9[44523]: ansible-ansible.legacy.uri Invoked with url=http://localhost:15001/index.txt return_content=True force=False http_agent=ansible-httpget use_proxy=True validate_certs=True force_basic_auth=False use_gssapi=False body_format=raw method=GET follow_redirects=safe status_code=[200] timeout=30 headers={} remote_src=False unredirected_headers=[] decompress=True use_netrc=True unsafe_writes=False url_username=None url_password=NOT_LOGGING_PARAMETER client_cert=None client_key=None dest=None body=None src=None creates=None removes=None unix_socket=None ca_path=None ciphers=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:15:03 managed-node2 python3.9[44673]: ansible-ansible.legacy.uri Invoked with url=http://localhost:15002/index.txt return_content=True force=False http_agent=ansible-httpget use_proxy=True validate_certs=True force_basic_auth=False use_gssapi=False body_format=raw method=GET follow_redirects=safe status_code=[200] timeout=30 headers={} remote_src=False unredirected_headers=[] decompress=True use_netrc=True unsafe_writes=False url_username=None url_password=NOT_LOGGING_PARAMETER client_cert=None client_key=None dest=None body=None src=None creates=None removes=None unix_socket=None ca_path=None ciphers=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:15:04 managed-node2 python3.9[44823]: ansible-ansible.legacy.uri Invoked with url=http://localhost:15003/index.txt return_content=True force=False http_agent=ansible-httpget use_proxy=True validate_certs=True force_basic_auth=False use_gssapi=False body_format=raw method=GET follow_redirects=safe status_code=[200] timeout=30 headers={} remote_src=False unredirected_headers=[] decompress=True use_netrc=True unsafe_writes=False url_username=None url_password=NOT_LOGGING_PARAMETER client_cert=None client_key=None dest=None body=None src=None creates=None removes=None unix_socket=None ca_path=None ciphers=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:15:07 managed-node2 python3.9[45122]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:15:08 managed-node2 python3.9[45277]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:15:11 managed-node2 python3.9[45428]: ansible-getent Invoked with database=passwd key=podman_basic_user fail_key=False service=None split=None Jul 07 20:15:12 managed-node2 python3.9[45578]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:15:12 managed-node2 python3.9[45729]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:15:12 managed-node2 python3.9[45879]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:15:14 managed-node2 python3.9[46029]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:15:14 managed-node2 python3.9[46179]: ansible-stat Invoked with path=/run/user/3001 follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:15:15 managed-node2 sudo[46330]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:15:15 managed-node2 sudo[46330]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yrlqohfxmrbjsiwrdleklbogdfrytzax ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933715.029206-17136-162272482862195/AnsiballZ_systemd.py' Jul 07 20:15:15 managed-node2 sudo[46330]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0) Jul 07 20:15:15 managed-node2 python3.9[46332]: ansible-systemd Invoked with name=podman-kube@-home-podman_basic_user-.config-containers-ansible\x2dkubernetes.d-httpd1.yml.service scope=user state=stopped enabled=False daemon_reload=False daemon_reexec=False no_block=False force=None masked=None Jul 07 20:15:15 managed-node2 systemd[27808]: Reloading. Jul 07 20:15:15 managed-node2 systemd[27808]: Stopping A template for running K8s workloads via podman-kube-play... ░░ Subject: A stop job for unit UNIT has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has begun execution. ░░ ░░ The job identifier is 150. Jul 07 20:15:25 managed-node2 podman[46347]: time="2025-07-07T20:15:25-04:00" level=warning msg="StopSignal SIGTERM failed to stop container httpd1-httpd1 in 10 seconds, resorting to SIGKILL" Jul 07 20:15:25 managed-node2 kernel: podman1: port 1(veth0) entered disabled state Jul 07 20:15:25 managed-node2 kernel: veth0 (unregistering): left allmulticast mode Jul 07 20:15:25 managed-node2 kernel: veth0 (unregistering): left promiscuous mode Jul 07 20:15:25 managed-node2 kernel: podman1: port 1(veth0) entered disabled state Jul 07 20:15:25 managed-node2 systemd[27808]: Removed slice cgroup user-libpod_pod_a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4.slice. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 151 and the job result is done. Jul 07 20:15:25 managed-node2 systemd[27808]: user-libpod_pod_a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4.slice: Failed to open /run/user/3001/systemd/transient/user-libpod_pod_a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4.slice: No such file or directory Jul 07 20:15:25 managed-node2 systemd[27808]: user-libpod_pod_a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4.slice: Failed to open /run/user/3001/systemd/transient/user-libpod_pod_a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4.slice: No such file or directory Jul 07 20:15:25 managed-node2 systemd[27808]: user-libpod_pod_a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4.slice: Failed to open /run/user/3001/systemd/transient/user-libpod_pod_a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4.slice: No such file or directory Jul 07 20:15:26 managed-node2 podman[46347]: Pods stopped: Jul 07 20:15:26 managed-node2 podman[46347]: a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4 Jul 07 20:15:26 managed-node2 podman[46347]: Pods removed: Jul 07 20:15:26 managed-node2 podman[46347]: a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4 Jul 07 20:15:26 managed-node2 podman[46347]: Secrets removed: Jul 07 20:15:26 managed-node2 podman[46347]: Volumes removed: Jul 07 20:15:26 managed-node2 systemd[27808]: Stopped A template for running K8s workloads via podman-kube-play. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 150 and the job result is done. Jul 07 20:15:26 managed-node2 sudo[46330]: pam_unix(sudo:session): session closed for user podman_basic_user Jul 07 20:15:26 managed-node2 python3.9[46572]: ansible-stat Invoked with path=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:15:26 managed-node2 sudo[46723]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:15:26 managed-node2 sudo[46723]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ngscqobpoontmmgbeazhbfnhtlrerjma ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933726.6496177-17450-135897393375501/AnsiballZ_podman_play.py' Jul 07 20:15:26 managed-node2 sudo[46723]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0) Jul 07 20:15:26 managed-node2 python3.9[46725]: ansible-containers.podman.podman_play Invoked with state=absent debug=True log_level=debug kube_file=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None quiet=None recreate=None userns=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None Jul 07 20:15:26 managed-node2 python3.9[46725]: ansible-containers.podman.podman_play version: 5.5.1, kube file /home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml Jul 07 20:15:27 managed-node2 systemd[27808]: Started podman-46732.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 152. Jul 07 20:15:27 managed-node2 python3.9[46725]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE command: /bin/podman kube play --down /home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml Jul 07 20:15:27 managed-node2 python3.9[46725]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stdout: Pods stopped: Pods removed: Secrets removed: Volumes removed: Jul 07 20:15:27 managed-node2 python3.9[46725]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stderr: Jul 07 20:15:27 managed-node2 python3.9[46725]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE rc: 0 Jul 07 20:15:27 managed-node2 sudo[46723]: pam_unix(sudo:session): session closed for user podman_basic_user Jul 07 20:15:27 managed-node2 python3.9[46888]: ansible-file Invoked with path=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:15:28 managed-node2 python3.9[47037]: ansible-getent Invoked with database=passwd key=root fail_key=False service=None split=None Jul 07 20:15:29 managed-node2 python3.9[47187]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:15:31 managed-node2 python3.9[47338]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/httpd2.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:15:31 managed-node2 python3.9[47488]: ansible-systemd Invoked with name=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service scope=system state=stopped enabled=False daemon_reload=False daemon_reexec=False no_block=False force=None masked=None Jul 07 20:15:31 managed-node2 systemd[1]: Reloading. Jul 07 20:15:31 managed-node2 systemd-rc-local-generator[47509]: /etc/rc.d/rc.local is not marked executable, skipping. Jul 07 20:15:32 managed-node2 systemd[1]: Stopping A template for running K8s workloads via podman-kube-play... ░░ Subject: A stop job for unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service has begun execution. ░░ ░░ The job identifier is 1768. Jul 07 20:15:32 managed-node2 podman[47527]: 2025-07-07 20:15:32.086748492 -0400 EDT m=+0.031435423 pod stop d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0 (image=, name=httpd2) Jul 07 20:15:42 managed-node2 podman[47527]: time="2025-07-07T20:15:42-04:00" level=warning msg="StopSignal SIGTERM failed to stop container httpd2-httpd2 in 10 seconds, resorting to SIGKILL" Jul 07 20:15:42 managed-node2 systemd[1]: libpod-3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5.scope has successfully entered the 'dead' state. Jul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.121044923 -0400 EDT m=+10.065732151 container died 3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service, app=test) Jul 07 20:15:42 managed-node2 systemd[1]: var-lib-containers-storage-overlay-6f2f0e89c245bbf36545733fa9225bf8ac05d0ba658f3773aea7623e3da19632-merged.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay-6f2f0e89c245bbf36545733fa9225bf8ac05d0ba658f3773aea7623e3da19632-merged.mount has successfully entered the 'dead' state. Jul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.165231828 -0400 EDT m=+10.109918731 container cleanup 3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0) Jul 07 20:15:42 managed-node2 systemd[1]: libpod-08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b.scope has successfully entered the 'dead' state. Jul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.176672676 -0400 EDT m=+10.121359827 container died 08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b (image=, name=d17e7ef2e094-infra, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service) Jul 07 20:15:42 managed-node2 kernel: podman1: port 1(veth0) entered disabled state Jul 07 20:15:42 managed-node2 kernel: veth0 (unregistering): left allmulticast mode Jul 07 20:15:42 managed-node2 kernel: veth0 (unregistering): left promiscuous mode Jul 07 20:15:42 managed-node2 kernel: podman1: port 1(veth0) entered disabled state Jul 07 20:15:42 managed-node2 systemd[1]: run-netns-netns\x2d2e00ab52\x2d0e7b\x2d94ee\x2da345\x2dec17caccc43b.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-netns-netns\x2d2e00ab52\x2d0e7b\x2d94ee\x2da345\x2dec17caccc43b.mount has successfully entered the 'dead' state. Jul 07 20:15:42 managed-node2 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b-rootfs-merge.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay\x2dcontainers-08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b-rootfs-merge.mount has successfully entered the 'dead' state. Jul 07 20:15:42 managed-node2 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b-userdata-shm.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay\x2dcontainers-08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b-userdata-shm.mount has successfully entered the 'dead' state. Jul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.276854989 -0400 EDT m=+10.221541921 container cleanup 08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b (image=, name=d17e7ef2e094-infra, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service) Jul 07 20:15:42 managed-node2 systemd[1]: Removed slice cgroup machine-libpod_pod_d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0.slice. ░░ Subject: A stop job for unit machine-libpod_pod_d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0.slice has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit machine-libpod_pod_d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0.slice has finished. ░░ ░░ The job identifier is 1770 and the job result is done. Jul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.302687887 -0400 EDT m=+10.247374820 container remove 3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry) Jul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.329086658 -0400 EDT m=+10.273773592 container remove 08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b (image=, name=d17e7ef2e094-infra, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service) Jul 07 20:15:42 managed-node2 systemd[1]: machine-libpod_pod_d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0.slice: Failed to open /run/systemd/transient/machine-libpod_pod_d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0.slice: No such file or directory Jul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.337213217 -0400 EDT m=+10.281900117 pod remove d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0 (image=, name=httpd2) Jul 07 20:15:42 managed-node2 systemd[1]: libpod-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32.scope has successfully entered the 'dead' state. Jul 07 20:15:42 managed-node2 conmon[32226]: conmon 78627b3638a40af7f868 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32.scope/container/memory.events Jul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.343750156 -0400 EDT m=+10.288437240 container kill 78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32 (image=, name=91ecf1609ad1-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service) Jul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.349945997 -0400 EDT m=+10.294633088 container died 78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32 (image=, name=91ecf1609ad1-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service) Jul 07 20:15:42 managed-node2 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32-rootfs-merge.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay\x2dcontainers-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32-rootfs-merge.mount has successfully entered the 'dead' state. Jul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.409484257 -0400 EDT m=+10.354171379 container remove 78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32 (image=, name=91ecf1609ad1-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service) Jul 07 20:15:42 managed-node2 podman[47527]: Pods stopped: Jul 07 20:15:42 managed-node2 podman[47527]: d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0 Jul 07 20:15:42 managed-node2 podman[47527]: Pods removed: Jul 07 20:15:42 managed-node2 podman[47527]: d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0 Jul 07 20:15:42 managed-node2 podman[47527]: Secrets removed: Jul 07 20:15:42 managed-node2 podman[47527]: Volumes removed: Jul 07 20:15:42 managed-node2 systemd[1]: podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service has successfully entered the 'dead' state. Jul 07 20:15:42 managed-node2 systemd[1]: Stopped A template for running K8s workloads via podman-kube-play. ░░ Subject: A stop job for unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service has finished. ░░ ░░ The job identifier is 1768 and the job result is done. Jul 07 20:15:42 managed-node2 python3.9[47729]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd2.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:15:43 managed-node2 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32-userdata-shm.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay\x2dcontainers-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32-userdata-shm.mount has successfully entered the 'dead' state. Jul 07 20:15:43 managed-node2 python3.9[47880]: ansible-containers.podman.podman_play Invoked with state=absent debug=True log_level=debug kube_file=/etc/containers/ansible-kubernetes.d/httpd2.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None quiet=None recreate=None userns=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None Jul 07 20:15:43 managed-node2 python3.9[47880]: ansible-containers.podman.podman_play version: 5.5.1, kube file /etc/containers/ansible-kubernetes.d/httpd2.yml Jul 07 20:15:43 managed-node2 python3.9[47880]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE command: /usr/bin/podman kube play --down /etc/containers/ansible-kubernetes.d/httpd2.yml Jul 07 20:15:43 managed-node2 python3.9[47880]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stdout: Pods stopped: Pods removed: Secrets removed: Volumes removed: Jul 07 20:15:43 managed-node2 python3.9[47880]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stderr: Jul 07 20:15:43 managed-node2 python3.9[47880]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE rc: 0 Jul 07 20:15:43 managed-node2 python3.9[48043]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d/httpd2.yml state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:15:45 managed-node2 python3.9[48192]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:15:46 managed-node2 python3.9[48343]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/httpd3.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:15:47 managed-node2 python3.9[48493]: ansible-systemd Invoked with name=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service scope=system state=stopped enabled=False daemon_reload=False daemon_reexec=False no_block=False force=None masked=None Jul 07 20:15:47 managed-node2 systemd[1]: Reloading. Jul 07 20:15:47 managed-node2 systemd-rc-local-generator[48513]: /etc/rc.d/rc.local is not marked executable, skipping. Jul 07 20:15:47 managed-node2 systemd[1]: Stopping A template for running K8s workloads via podman-kube-play... ░░ Subject: A stop job for unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service has begun execution. ░░ ░░ The job identifier is 1771. Jul 07 20:15:47 managed-node2 podman[48533]: 2025-07-07 20:15:47.405787867 -0400 EDT m=+0.031643471 pod stop 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36 (image=, name=httpd3) Jul 07 20:15:57 managed-node2 podman[48533]: time="2025-07-07T20:15:57-04:00" level=warning msg="StopSignal SIGTERM failed to stop container httpd3-httpd3 in 10 seconds, resorting to SIGKILL" Jul 07 20:15:57 managed-node2 systemd[1]: libpod-9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109.scope has successfully entered the 'dead' state. Jul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.434010239 -0400 EDT m=+10.059866007 container died 9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0) Jul 07 20:15:57 managed-node2 systemd[1]: var-lib-containers-storage-overlay-628129360f5470c8a5e4c9e68712c0420c79d4a01d22a8088c316ba43c268778-merged.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay-628129360f5470c8a5e4c9e68712c0420c79d4a01d22a8088c316ba43c268778-merged.mount has successfully entered the 'dead' state. Jul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.478667416 -0400 EDT m=+10.104522986 container cleanup 9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service) Jul 07 20:15:57 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jul 07 20:15:57 managed-node2 systemd[1]: libpod-e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641.scope has successfully entered the 'dead' state. Jul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.500190016 -0400 EDT m=+10.126045733 container died e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641 (image=, name=0b34d5e9f949-infra, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service) Jul 07 20:15:57 managed-node2 systemd[1]: run-rce7152e4cf79441b86b3f3ed7d6f4283.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-rce7152e4cf79441b86b3f3ed7d6f4283.scope has successfully entered the 'dead' state. Jul 07 20:15:57 managed-node2 kernel: podman1: port 2(veth1) entered disabled state Jul 07 20:15:57 managed-node2 kernel: veth1 (unregistering): left allmulticast mode Jul 07 20:15:57 managed-node2 kernel: veth1 (unregistering): left promiscuous mode Jul 07 20:15:57 managed-node2 kernel: podman1: port 2(veth1) entered disabled state Jul 07 20:15:57 managed-node2 NetworkManager[644]: [1751933757.5423] device (podman1): state change: activated -> unmanaged (reason 'unmanaged', managed-type: 'removed') Jul 07 20:15:57 managed-node2 systemd[1]: Starting Network Manager Script Dispatcher Service... ░░ Subject: A start job for unit NetworkManager-dispatcher.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has begun execution. ░░ ░░ The job identifier is 1773. Jul 07 20:15:57 managed-node2 systemd[1]: Started Network Manager Script Dispatcher Service. ░░ Subject: A start job for unit NetworkManager-dispatcher.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has finished successfully. ░░ ░░ The job identifier is 1773. Jul 07 20:15:57 managed-node2 systemd[1]: run-netns-netns\x2db2b0269b\x2d6f52\x2d704b\x2de0f2\x2d936fd9832ebd.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-netns-netns\x2db2b0269b\x2d6f52\x2d704b\x2de0f2\x2d936fd9832ebd.mount has successfully entered the 'dead' state. Jul 07 20:15:57 managed-node2 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641-rootfs-merge.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay\x2dcontainers-e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641-rootfs-merge.mount has successfully entered the 'dead' state. Jul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.72272296 -0400 EDT m=+10.348578562 container cleanup e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641 (image=, name=0b34d5e9f949-infra, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service) Jul 07 20:15:57 managed-node2 systemd[1]: Removed slice cgroup machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice. ░░ Subject: A stop job for unit machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice has finished. ░░ ░░ The job identifier is 1839 and the job result is done. Jul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.730067956 -0400 EDT m=+10.355923539 pod stop 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36 (image=, name=httpd3) Jul 07 20:15:57 managed-node2 systemd[1]: machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice: Failed to open /run/systemd/transient/machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice: No such file or directory Jul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.736214931 -0400 EDT m=+10.362070507 pod stop 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36 (image=, name=httpd3) Jul 07 20:15:57 managed-node2 systemd[1]: machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice: Failed to open /run/systemd/transient/machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice: No such file or directory Jul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.760284407 -0400 EDT m=+10.386140034 container remove 9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service) Jul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.786340751 -0400 EDT m=+10.412196374 container remove e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641 (image=, name=0b34d5e9f949-infra, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service) Jul 07 20:15:57 managed-node2 systemd[1]: machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice: Failed to open /run/systemd/transient/machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice: No such file or directory Jul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.794425826 -0400 EDT m=+10.420281396 pod remove 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36 (image=, name=httpd3) Jul 07 20:15:57 managed-node2 systemd[1]: libpod-eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc.scope has successfully entered the 'dead' state. Jul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.797955293 -0400 EDT m=+10.423811069 container kill eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc (image=, name=55c9b4d93968-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service) Jul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.805138661 -0400 EDT m=+10.430994476 container died eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc (image=, name=55c9b4d93968-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service) Jul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.86757777 -0400 EDT m=+10.493433378 container remove eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc (image=, name=55c9b4d93968-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service) Jul 07 20:15:57 managed-node2 podman[48533]: Pods stopped: Jul 07 20:15:57 managed-node2 podman[48533]: 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36 Jul 07 20:15:57 managed-node2 podman[48533]: Pods removed: Jul 07 20:15:57 managed-node2 podman[48533]: 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36 Jul 07 20:15:57 managed-node2 podman[48533]: Secrets removed: Jul 07 20:15:57 managed-node2 podman[48533]: Volumes removed: Jul 07 20:15:57 managed-node2 systemd[1]: podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service has successfully entered the 'dead' state. Jul 07 20:15:57 managed-node2 systemd[1]: Stopped A template for running K8s workloads via podman-kube-play. ░░ Subject: A stop job for unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service has finished. ░░ ░░ The job identifier is 1771 and the job result is done. Jul 07 20:15:58 managed-node2 python3.9[48770]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd3.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:15:58 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jul 07 20:15:58 managed-node2 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641-userdata-shm.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay\x2dcontainers-e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641-userdata-shm.mount has successfully entered the 'dead' state. Jul 07 20:15:58 managed-node2 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc-rootfs-merge.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay\x2dcontainers-eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc-rootfs-merge.mount has successfully entered the 'dead' state. Jul 07 20:15:58 managed-node2 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc-userdata-shm.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay\x2dcontainers-eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc-userdata-shm.mount has successfully entered the 'dead' state. Jul 07 20:15:58 managed-node2 python3.9[48921]: ansible-containers.podman.podman_play Invoked with state=absent kube_file=/etc/containers/ansible-kubernetes.d/httpd3.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None debug=None quiet=None recreate=None userns=None log_level=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None Jul 07 20:15:58 managed-node2 python3.9[48921]: ansible-containers.podman.podman_play version: 5.5.1, kube file /etc/containers/ansible-kubernetes.d/httpd3.yml Jul 07 20:15:58 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jul 07 20:15:59 managed-node2 python3.9[49083]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d/httpd3.yml state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:16:00 managed-node2 python3.9[49232]: ansible-getent Invoked with database=passwd key=podman_basic_user fail_key=True service=None split=None Jul 07 20:16:00 managed-node2 python3.9[49382]: ansible-stat Invoked with path=/run/user/3001 follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:01 managed-node2 sudo[49533]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:16:01 managed-node2 sudo[49533]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-eyaoewgeadnpajdfsqkdmnspilftsmzm ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933761.058141-18477-143961090014844/AnsiballZ_podman_container_info.py' Jul 07 20:16:01 managed-node2 sudo[49533]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0) Jul 07 20:16:01 managed-node2 python3.9[49535]: ansible-containers.podman.podman_container_info Invoked with executable=podman name=None Jul 07 20:16:01 managed-node2 systemd[27808]: Started podman-49536.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 156. Jul 07 20:16:01 managed-node2 sudo[49533]: pam_unix(sudo:session): session closed for user podman_basic_user Jul 07 20:16:01 managed-node2 sudo[49691]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:16:01 managed-node2 sudo[49691]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-csyhukllkdoqxtgiejqztpcafyureeyp ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933761.7158227-18498-173793406938955/AnsiballZ_command.py' Jul 07 20:16:01 managed-node2 sudo[49691]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0) Jul 07 20:16:01 managed-node2 python3.9[49693]: ansible-ansible.legacy.command Invoked with _raw_params=podman network ls -q _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:02 managed-node2 systemd[27808]: Started podman-49694.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 160. Jul 07 20:16:02 managed-node2 sudo[49691]: pam_unix(sudo:session): session closed for user podman_basic_user Jul 07 20:16:02 managed-node2 sudo[49850]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:16:02 managed-node2 sudo[49850]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nugdaehmscgbqoulldhoxbffneulpeqi ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933762.1564903-18516-230631949404561/AnsiballZ_command.py' Jul 07 20:16:02 managed-node2 sudo[49850]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0) Jul 07 20:16:02 managed-node2 python3.9[49852]: ansible-ansible.legacy.command Invoked with _raw_params=podman secret ls -n -q _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:02 managed-node2 systemd[27808]: Started podman-49853.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 164. Jul 07 20:16:02 managed-node2 sudo[49850]: pam_unix(sudo:session): session closed for user podman_basic_user Jul 07 20:16:02 managed-node2 python3.9[50009]: ansible-ansible.legacy.command Invoked with removes=/var/lib/systemd/linger/podman_basic_user _raw_params=loginctl disable-linger podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None stdin=None Jul 07 20:16:02 managed-node2 systemd[1]: Stopping User Manager for UID 3001... ░░ Subject: A stop job for unit user@3001.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit user@3001.service has begun execution. ░░ ░░ The job identifier is 1840. Jul 07 20:16:02 managed-node2 systemd[27808]: Activating special unit Exit the Session... Jul 07 20:16:02 managed-node2 systemd[27808]: Stopping podman-pause-7fbe17c5.scope... ░░ Subject: A stop job for unit UNIT has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has begun execution. ░░ ░░ The job identifier is 181. Jul 07 20:16:02 managed-node2 systemd[27808]: Removed slice Slice /app/podman-kube. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 183 and the job result is done. Jul 07 20:16:02 managed-node2 systemd[27808]: Removed slice cgroup user-libpod_pod_f60d38092e129513b51a0b2f07fc5347e46ea863cdd2a5b1d5752245e63e591c.slice. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 180 and the job result is done. Jul 07 20:16:02 managed-node2 systemd[27808]: Stopped target Main User Target. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 174 and the job result is done. Jul 07 20:16:02 managed-node2 systemd[27808]: Stopped target Basic System. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 187 and the job result is done. Jul 07 20:16:02 managed-node2 systemd[27808]: Stopped target Paths. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 185 and the job result is done. Jul 07 20:16:02 managed-node2 systemd[27808]: Stopped target Sockets. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 173 and the job result is done. Jul 07 20:16:02 managed-node2 systemd[27808]: Stopped target Timers. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 188 and the job result is done. Jul 07 20:16:02 managed-node2 systemd[27808]: Stopped Mark boot as successful after the user session has run 2 minutes. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 186 and the job result is done. Jul 07 20:16:02 managed-node2 systemd[27808]: Stopped Daily Cleanup of User's Temporary Directories. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 172 and the job result is done. Jul 07 20:16:02 managed-node2 dbus-broker[28296]: Dispatched 2118 messages @ 3(±15)μs / message. ░░ Subject: Dispatched 2118 messages ░░ Defined-By: dbus-broker ░░ Support: https://groups.google.com/forum/#!forum/bus1-devel ░░ ░░ This message is printed by dbus-broker when shutting down. It includes metric ░░ information collected during the runtime of dbus-broker. ░░ ░░ The message lists the number of dispatched messages ░░ (in this case 2118) as well as the mean time to ░░ handling a single message. The time measurements exclude the time spent on ░░ writing to and reading from the kernel. Jul 07 20:16:02 managed-node2 systemd[27808]: Stopping D-Bus User Message Bus... ░░ Subject: A stop job for unit UNIT has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has begun execution. ░░ ░░ The job identifier is 171. Jul 07 20:16:02 managed-node2 systemd[27808]: Stopped Create User's Volatile Files and Directories. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 176 and the job result is done. Jul 07 20:16:02 managed-node2 systemd[27808]: Stopped D-Bus User Message Bus. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 171 and the job result is done. Jul 07 20:16:02 managed-node2 systemd[27808]: Stopped podman-pause-7fbe17c5.scope. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 181 and the job result is done. Jul 07 20:16:02 managed-node2 systemd[27808]: Removed slice Slice /user. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 179 and the job result is done. Jul 07 20:16:02 managed-node2 systemd[27808]: Closed D-Bus User Message Bus Socket. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 175 and the job result is done. Jul 07 20:16:02 managed-node2 systemd[27808]: Removed slice User Application Slice. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 184 and the job result is done. Jul 07 20:16:02 managed-node2 systemd[27808]: Reached target Shutdown. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 170. Jul 07 20:16:02 managed-node2 systemd[27808]: Finished Exit the Session. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 169. Jul 07 20:16:02 managed-node2 systemd[27808]: Reached target Exit the Session. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 168. Jul 07 20:16:02 managed-node2 systemd[1]: user@3001.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit user@3001.service has successfully entered the 'dead' state. Jul 07 20:16:02 managed-node2 systemd[1]: Stopped User Manager for UID 3001. ░░ Subject: A stop job for unit user@3001.service has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit user@3001.service has finished. ░░ ░░ The job identifier is 1840 and the job result is done. Jul 07 20:16:02 managed-node2 systemd[1]: user@3001.service: Consumed 2.173s CPU time. ░░ Subject: Resources consumed by unit runtime ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit user@3001.service completed and consumed the indicated resources. Jul 07 20:16:02 managed-node2 systemd[1]: Stopping User Runtime Directory /run/user/3001... ░░ Subject: A stop job for unit user-runtime-dir@3001.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit user-runtime-dir@3001.service has begun execution. ░░ ░░ The job identifier is 1841. Jul 07 20:16:03 managed-node2 systemd[1]: run-user-3001.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-user-3001.mount has successfully entered the 'dead' state. Jul 07 20:16:03 managed-node2 systemd[1]: user-runtime-dir@3001.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit user-runtime-dir@3001.service has successfully entered the 'dead' state. Jul 07 20:16:03 managed-node2 systemd[1]: Stopped User Runtime Directory /run/user/3001. ░░ Subject: A stop job for unit user-runtime-dir@3001.service has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit user-runtime-dir@3001.service has finished. ░░ ░░ The job identifier is 1841 and the job result is done. Jul 07 20:16:03 managed-node2 systemd[1]: Removed slice User Slice of UID 3001. ░░ Subject: A stop job for unit user-3001.slice has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit user-3001.slice has finished. ░░ ░░ The job identifier is 1843 and the job result is done. Jul 07 20:16:03 managed-node2 systemd[1]: user-3001.slice: Consumed 2.196s CPU time. ░░ Subject: Resources consumed by unit runtime ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit user-3001.slice completed and consumed the indicated resources. Jul 07 20:16:03 managed-node2 python3.9[50161]: ansible-ansible.legacy.command Invoked with _raw_params=loginctl show-user --value -p State podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:04 managed-node2 sudo[50311]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:16:04 managed-node2 sudo[50311]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dpicetofkbhrscpezuhafexsvxxmiwru ; /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933764.0345392-18589-160568711821160/AnsiballZ_command.py' Jul 07 20:16:04 managed-node2 sudo[50311]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0) Jul 07 20:16:04 managed-node2 python3.9[50313]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod exists httpd1 _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:04 managed-node2 sudo[50311]: pam_unix(sudo:session): session closed for user podman_basic_user Jul 07 20:16:04 managed-node2 python3.9[50468]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod exists httpd2 _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:04 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jul 07 20:16:05 managed-node2 python3.9[50624]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod exists httpd3 _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:05 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jul 07 20:16:05 managed-node2 sudo[50781]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:16:05 managed-node2 sudo[50781]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ulqlnfrznxyujwhsbktkenenjnaaarpn ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933765.2955933-18640-73245982266512/AnsiballZ_command.py' Jul 07 20:16:05 managed-node2 sudo[50781]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0) Jul 07 20:16:05 managed-node2 python3.9[50783]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail systemctl --user list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd1[.]yml[.]service[ ]+loaded[ ]+active ' _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:05 managed-node2 sudo[50781]: pam_unix(sudo:session): session closed for user podman_basic_user Jul 07 20:16:05 managed-node2 python3.9[50935]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail systemctl --system list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd2[.]yml[.]service[ ]+loaded[ ]+active ' _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:06 managed-node2 python3.9[51087]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail systemctl --system list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd3[.]yml[.]service[ ]+loaded[ ]+active ' _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:06 managed-node2 python3.9[51239]: ansible-stat Invoked with path=/var/lib/systemd/linger/podman_basic_user follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:07 managed-node2 systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit NetworkManager-dispatcher.service has successfully entered the 'dead' state. Jul 07 20:16:09 managed-node2 python3.9[51537]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:10 managed-node2 python3.9[51692]: ansible-getent Invoked with database=passwd key=root fail_key=False service=None split=None Jul 07 20:16:10 managed-node2 python3.9[51842]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:14 managed-node2 python3.9[51993]: ansible-getent Invoked with database=passwd key=podman_basic_user fail_key=False service=None split=None Jul 07 20:16:14 managed-node2 python3.9[52143]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:15 managed-node2 python3.9[52294]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:15 managed-node2 python3.9[52444]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:17 managed-node2 python3.9[52594]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:17 managed-node2 python3.9[52744]: ansible-stat Invoked with path=/run/user/3001 follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:17 managed-node2 python3.9[52893]: ansible-stat Invoked with path=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:18 managed-node2 python3.9[53042]: ansible-file Invoked with path=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:16:19 managed-node2 python3.9[53191]: ansible-getent Invoked with database=passwd key=root fail_key=False service=None split=None Jul 07 20:16:20 managed-node2 python3.9[53341]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:21 managed-node2 python3.9[53492]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/httpd2.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:21 managed-node2 python3.9[53642]: ansible-systemd Invoked with name=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service scope=system state=stopped enabled=False daemon_reload=False daemon_reexec=False no_block=False force=None masked=None Jul 07 20:16:22 managed-node2 python3.9[53793]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd2.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:22 managed-node2 python3.9[53942]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d/httpd2.yml state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:16:23 managed-node2 python3.9[54091]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:25 managed-node2 python3.9[54242]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/httpd3.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:25 managed-node2 python3.9[54392]: ansible-systemd Invoked with name=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service scope=system state=stopped enabled=False daemon_reload=False daemon_reexec=False no_block=False force=None masked=None Jul 07 20:16:26 managed-node2 python3.9[54543]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd3.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:26 managed-node2 python3.9[54692]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d/httpd3.yml state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:16:27 managed-node2 python3.9[54841]: ansible-getent Invoked with database=passwd key=podman_basic_user fail_key=True service=None split=None Jul 07 20:16:28 managed-node2 python3.9[54991]: ansible-stat Invoked with path=/run/user/3001 follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:29 managed-node2 python3.9[55140]: ansible-file Invoked with path=/etc/containers/storage.conf state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:16:30 managed-node2 python3.9[55289]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:16:33 managed-node2 python3.9[55487]: ansible-ansible.legacy.setup Invoked with gather_subset=['all'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d Jul 07 20:16:34 managed-node2 python3.9[55662]: ansible-stat Invoked with path=/run/ostree-booted follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:34 managed-node2 python3.9[55811]: ansible-stat Invoked with path=/sbin/transactional-update follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:36 managed-node2 python3.9[56109]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:37 managed-node2 python3.9[56264]: ansible-getent Invoked with database=passwd key=root fail_key=False service=None split=None Jul 07 20:16:37 managed-node2 python3.9[56414]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:40 managed-node2 python3.9[56565]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:41 managed-node2 python3.9[56716]: ansible-file Invoked with path=/etc/containers/systemd state=directory owner=root group=0 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:16:41 managed-node2 python3.9[56865]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/systemd/quadlet-pod-pod.pod follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Jul 07 20:16:42 managed-node2 python3.9[56985]: ansible-ansible.legacy.copy Invoked with src=/root/.ansible/tmp/ansible-tmp-1751933801.5835004-19965-109711770661066/.source.pod dest=/etc/containers/systemd/quadlet-pod-pod.pod owner=root group=0 mode=0644 follow=False _original_basename=systemd.j2 checksum=1884c880482430d8bf2e944b003734fb8b7a462d backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:16:42 managed-node2 python3.9[57134]: ansible-systemd Invoked with daemon_reload=True scope=system daemon_reexec=False no_block=False name=None state=None enabled=None force=None masked=None Jul 07 20:16:42 managed-node2 systemd[1]: Reloading. Jul 07 20:16:43 managed-node2 systemd-rc-local-generator[57151]: /etc/rc.d/rc.local is not marked executable, skipping. Jul 07 20:16:43 managed-node2 python3.9[57317]: ansible-systemd Invoked with name=quadlet-pod-pod-pod.service scope=system state=started daemon_reload=False daemon_reexec=False no_block=False enabled=None force=None masked=None Jul 07 20:16:43 managed-node2 systemd[1]: Starting quadlet-pod-pod-pod.service... ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has begun execution. ░░ ░░ The job identifier is 1845. Jul 07 20:16:43 managed-node2 systemd[1]: var-lib-containers-storage-overlay-metacopy\x2dcheck327374229-merged.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay-metacopy\x2dcheck327374229-merged.mount has successfully entered the 'dead' state. Jul 07 20:16:43 managed-node2 systemd[1]: Created slice cgroup machine-libpod_pod_e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79.slice. ░░ Subject: A start job for unit machine-libpod_pod_e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit machine-libpod_pod_e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79.slice has finished successfully. ░░ ░░ The job identifier is 1916. Jul 07 20:16:43 managed-node2 podman[57321]: 2025-07-07 20:16:43.753251921 -0400 EDT m=+0.075359120 container create 8854ba6a76c45d1f49cbb40fb6b5ea32b169bc30ffa29374b62851695b180a1c (image=, name=quadlet-pod-infra, pod_id=e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service) Jul 07 20:16:43 managed-node2 podman[57321]: 2025-07-07 20:16:43.760005549 -0400 EDT m=+0.082112720 pod create e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79 (image=, name=quadlet-pod) Jul 07 20:16:43 managed-node2 quadlet-pod-pod-pod[57321]: e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79 Jul 07 20:16:43 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8066] manager: (podman0): new Bridge device (/org/freedesktop/NetworkManager/Devices/9) Jul 07 20:16:43 managed-node2 kernel: podman0: port 1(veth0) entered blocking state Jul 07 20:16:43 managed-node2 kernel: podman0: port 1(veth0) entered disabled state Jul 07 20:16:43 managed-node2 kernel: veth0: entered allmulticast mode Jul 07 20:16:43 managed-node2 kernel: veth0: entered promiscuous mode Jul 07 20:16:43 managed-node2 kernel: podman0: port 1(veth0) entered blocking state Jul 07 20:16:43 managed-node2 kernel: podman0: port 1(veth0) entered forwarding state Jul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8206] manager: (veth0): new Veth device (/org/freedesktop/NetworkManager/Devices/10) Jul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8221] device (veth0): carrier: link connected Jul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8226] device (podman0): carrier: link connected Jul 07 20:16:43 managed-node2 systemd-udevd[57347]: Network interface NamePolicy= disabled on kernel command line. Jul 07 20:16:43 managed-node2 systemd-udevd[57348]: Network interface NamePolicy= disabled on kernel command line. Jul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8651] device (podman0): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external') Jul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8658] device (podman0): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'external') Jul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8668] device (podman0): Activation: starting connection 'podman0' (0dc63386-fc14-4ac2-8cee-25b24d1739b5) Jul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8670] device (podman0): state change: disconnected -> prepare (reason 'none', managed-type: 'external') Jul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8673] device (podman0): state change: prepare -> config (reason 'none', managed-type: 'external') Jul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8676] device (podman0): state change: config -> ip-config (reason 'none', managed-type: 'external') Jul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8679] device (podman0): state change: ip-config -> ip-check (reason 'none', managed-type: 'external') Jul 07 20:16:43 managed-node2 systemd[1]: Starting Network Manager Script Dispatcher Service... ░░ Subject: A start job for unit NetworkManager-dispatcher.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has begun execution. ░░ ░░ The job identifier is 1921. Jul 07 20:16:43 managed-node2 systemd[1]: Started Network Manager Script Dispatcher Service. ░░ Subject: A start job for unit NetworkManager-dispatcher.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has finished successfully. ░░ ░░ The job identifier is 1921. Jul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8930] device (podman0): state change: ip-check -> secondaries (reason 'none', managed-type: 'external') Jul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8932] device (podman0): state change: secondaries -> activated (reason 'none', managed-type: 'external') Jul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8938] device (podman0): Activation: successful, device activated. Jul 07 20:16:43 managed-node2 systemd[1]: Started libcrun container. ░░ Subject: A start job for unit libpod-8854ba6a76c45d1f49cbb40fb6b5ea32b169bc30ffa29374b62851695b180a1c.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-8854ba6a76c45d1f49cbb40fb6b5ea32b169bc30ffa29374b62851695b180a1c.scope has finished successfully. ░░ ░░ The job identifier is 1987. Jul 07 20:16:43 managed-node2 podman[57329]: 2025-07-07 20:16:43.975146229 -0400 EDT m=+0.200627141 container init 8854ba6a76c45d1f49cbb40fb6b5ea32b169bc30ffa29374b62851695b180a1c (image=, name=quadlet-pod-infra, pod_id=e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service) Jul 07 20:16:43 managed-node2 podman[57329]: 2025-07-07 20:16:43.981266459 -0400 EDT m=+0.206747252 container start 8854ba6a76c45d1f49cbb40fb6b5ea32b169bc30ffa29374b62851695b180a1c (image=, name=quadlet-pod-infra, pod_id=e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service) Jul 07 20:16:43 managed-node2 podman[57329]: 2025-07-07 20:16:43.987430031 -0400 EDT m=+0.212910758 pod start e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79 (image=, name=quadlet-pod) Jul 07 20:16:43 managed-node2 quadlet-pod-pod-pod[57329]: quadlet-pod Jul 07 20:16:43 managed-node2 systemd[1]: Started quadlet-pod-pod-pod.service. ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has finished successfully. ░░ ░░ The job identifier is 1845. Jul 07 20:16:44 managed-node2 python3.9[57565]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:46 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jul 07 20:16:46 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jul 07 20:16:46 managed-node2 podman[57748]: 2025-07-07 20:16:46.651148299 -0400 EDT m=+0.387653928 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610 Jul 07 20:16:47 managed-node2 python3.9[57912]: ansible-file Invoked with path=/etc/containers/systemd state=directory owner=root group=0 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:16:47 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jul 07 20:16:47 managed-node2 python3.9[58061]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/systemd/quadlet-pod-container.container follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Jul 07 20:16:47 managed-node2 python3.9[58181]: ansible-ansible.legacy.copy Invoked with src=/root/.ansible/tmp/ansible-tmp-1751933807.2449324-20069-121688553430320/.source.container dest=/etc/containers/systemd/quadlet-pod-container.container owner=root group=0 mode=0644 follow=False _original_basename=systemd.j2 checksum=f0b5c8159fc3c65bf9310a371751609e4c1ba4c3 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:16:48 managed-node2 python3.9[58330]: ansible-systemd Invoked with daemon_reload=True scope=system daemon_reexec=False no_block=False name=None state=None enabled=None force=None masked=None Jul 07 20:16:48 managed-node2 systemd[1]: Reloading. Jul 07 20:16:48 managed-node2 systemd-rc-local-generator[58347]: /etc/rc.d/rc.local is not marked executable, skipping. Jul 07 20:16:48 managed-node2 python3.9[58513]: ansible-systemd Invoked with name=quadlet-pod-container.service scope=system state=started daemon_reload=False daemon_reexec=False no_block=False enabled=None force=None masked=None Jul 07 20:16:49 managed-node2 systemd[1]: Starting quadlet-pod-container.service... ░░ Subject: A start job for unit quadlet-pod-container.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-container.service has begun execution. ░░ ░░ The job identifier is 1992. Jul 07 20:16:49 managed-node2 podman[58517]: 2025-07-07 20:16:49.074860568 -0400 EDT m=+0.046591683 container create b345bf186b4d8ce4960e19da7b04d5b12bd2095620bf2b36a22c1a624a5edc3e (image=quay.io/libpod/testimage:20210610, name=quadlet-pod-container, pod_id=e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79, PODMAN_SYSTEMD_UNIT=quadlet-pod-container.service, created_by=test/system/build-testimage, io.buildah.version=1.21.0, created_at=2021-06-10T18:55:36Z) Jul 07 20:16:49 managed-node2 systemd[1]: var-lib-containers-storage-overlay-volatile\x2dcheck976746358-merged.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay-volatile\x2dcheck976746358-merged.mount has successfully entered the 'dead' state. Jul 07 20:16:49 managed-node2 podman[58517]: 2025-07-07 20:16:49.117361038 -0400 EDT m=+0.089092248 container init b345bf186b4d8ce4960e19da7b04d5b12bd2095620bf2b36a22c1a624a5edc3e (image=quay.io/libpod/testimage:20210610, name=quadlet-pod-container, pod_id=e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79, created_by=test/system/build-testimage, io.buildah.version=1.21.0, created_at=2021-06-10T18:55:36Z, PODMAN_SYSTEMD_UNIT=quadlet-pod-container.service) Jul 07 20:16:49 managed-node2 systemd[1]: Started quadlet-pod-container.service. ░░ Subject: A start job for unit quadlet-pod-container.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-container.service has finished successfully. ░░ ░░ The job identifier is 1992. Jul 07 20:16:49 managed-node2 podman[58517]: 2025-07-07 20:16:49.122201448 -0400 EDT m=+0.093932749 container start b345bf186b4d8ce4960e19da7b04d5b12bd2095620bf2b36a22c1a624a5edc3e (image=quay.io/libpod/testimage:20210610, name=quadlet-pod-container, pod_id=e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79, created_at=2021-06-10T18:55:36Z, PODMAN_SYSTEMD_UNIT=quadlet-pod-container.service, created_by=test/system/build-testimage, io.buildah.version=1.21.0) Jul 07 20:16:49 managed-node2 quadlet-pod-container[58517]: b345bf186b4d8ce4960e19da7b04d5b12bd2095620bf2b36a22c1a624a5edc3e Jul 07 20:16:49 managed-node2 podman[58517]: 2025-07-07 20:16:49.05249592 -0400 EDT m=+0.024227291 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610 Jul 07 20:16:49 managed-node2 python3.9[58679]: ansible-ansible.legacy.command Invoked with _raw_params=cat /etc/containers/systemd/quadlet-pod-container.container _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:49 managed-node2 python3.9[58829]: ansible-ansible.legacy.command Invoked with _raw_params=cat /etc/containers/systemd/quadlet-pod-pod.pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:50 managed-node2 python3.9[58979]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod inspect quadlet-pod --format '{{range .Containers}}{{.Name}} {{end}}' _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:50 managed-node2 python3.9[59137]: ansible-user Invoked with name=user_quadlet_pod uid=2223 state=present non_unique=False force=False remove=False create_home=True system=False move_home=False append=False ssh_key_bits=0 ssh_key_type=rsa ssh_key_comment=ansible-generated on managed-node2 update_password=always group=None groups=None comment=None home=None shell=None password=NOT_LOGGING_PARAMETER login_class=None password_expire_max=None password_expire_min=None password_expire_warn=None hidden=None seuser=None skeleton=None generate_ssh_key=None ssh_key_file=None ssh_key_passphrase=NOT_LOGGING_PARAMETER expires=None password_lock=None local=None profile=None authorization=None role=None umask=None Jul 07 20:16:50 managed-node2 useradd[59139]: new group: name=user_quadlet_pod, GID=2223 Jul 07 20:16:50 managed-node2 useradd[59139]: new user: name=user_quadlet_pod, UID=2223, GID=2223, home=/home/user_quadlet_pod, shell=/bin/bash, from=/dev/pts/0 Jul 07 20:16:50 managed-node2 rsyslogd[812]: imjournal: journal files changed, reloading... [v8.2412.0-2.el9 try https://www.rsyslog.com/e/0 ] Jul 07 20:16:52 managed-node2 python3.9[59444]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:53 managed-node2 python3.9[59599]: ansible-getent Invoked with database=passwd key=user_quadlet_pod fail_key=False service=None split=None Jul 07 20:16:53 managed-node2 python3.9[59749]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:53 managed-node2 systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit NetworkManager-dispatcher.service has successfully entered the 'dead' state. Jul 07 20:16:54 managed-node2 python3.9[59900]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:54 managed-node2 python3.9[60050]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:56 managed-node2 python3.9[60200]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:57 managed-node2 python3.9[60351]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:57 managed-node2 python3.9[60501]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:58 managed-node2 python3.9[60651]: ansible-ansible.legacy.command Invoked with creates=/var/lib/systemd/linger/user_quadlet_pod _raw_params=loginctl enable-linger user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None removes=None stdin=None Jul 07 20:16:58 managed-node2 systemd[1]: Created slice User Slice of UID 2223. ░░ Subject: A start job for unit user-2223.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit user-2223.slice has finished successfully. ░░ ░░ The job identifier is 2130. Jul 07 20:16:58 managed-node2 systemd[1]: Starting User Runtime Directory /run/user/2223... ░░ Subject: A start job for unit user-runtime-dir@2223.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit user-runtime-dir@2223.service has begun execution. ░░ ░░ The job identifier is 2065. Jul 07 20:16:58 managed-node2 systemd[1]: Finished User Runtime Directory /run/user/2223. ░░ Subject: A start job for unit user-runtime-dir@2223.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit user-runtime-dir@2223.service has finished successfully. ░░ ░░ The job identifier is 2065. Jul 07 20:16:58 managed-node2 systemd[1]: Starting User Manager for UID 2223... ░░ Subject: A start job for unit user@2223.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit user@2223.service has begun execution. ░░ ░░ The job identifier is 2064. Jul 07 20:16:58 managed-node2 systemd[60658]: pam_unix(systemd-user:session): session opened for user user_quadlet_pod(uid=2223) by user_quadlet_pod(uid=0) Jul 07 20:16:58 managed-node2 systemd[60658]: Queued start job for default target Main User Target. Jul 07 20:16:58 managed-node2 systemd[60658]: Created slice User Application Slice. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 4. Jul 07 20:16:58 managed-node2 systemd[60658]: Started Mark boot as successful after the user session has run 2 minutes. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 8. Jul 07 20:16:58 managed-node2 systemd[60658]: Started Daily Cleanup of User's Temporary Directories. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 9. Jul 07 20:16:58 managed-node2 systemd[60658]: Reached target Paths. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 12. Jul 07 20:16:58 managed-node2 systemd[60658]: Reached target Timers. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 7. Jul 07 20:16:58 managed-node2 systemd[60658]: Starting D-Bus User Message Bus Socket... ░░ Subject: A start job for unit UNIT has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has begun execution. ░░ ░░ The job identifier is 11. Jul 07 20:16:58 managed-node2 systemd[60658]: Starting Create User's Volatile Files and Directories... ░░ Subject: A start job for unit UNIT has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has begun execution. ░░ ░░ The job identifier is 3. Jul 07 20:16:58 managed-node2 systemd[60658]: Listening on D-Bus User Message Bus Socket. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 11. Jul 07 20:16:58 managed-node2 systemd[60658]: Finished Create User's Volatile Files and Directories. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 3. Jul 07 20:16:58 managed-node2 systemd[60658]: Reached target Sockets. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 10. Jul 07 20:16:58 managed-node2 systemd[60658]: Reached target Basic System. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 2. Jul 07 20:16:58 managed-node2 systemd[60658]: Reached target Main User Target. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 1. Jul 07 20:16:58 managed-node2 systemd[60658]: Startup finished in 65ms. ░░ Subject: User manager start-up is now complete ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The user manager instance for user 2223 has been started. All services queued ░░ for starting have been started. Note that other services might still be starting ░░ up or be started at any later time. ░░ ░░ Startup of the manager took 65603 microseconds. Jul 07 20:16:58 managed-node2 systemd[1]: Started User Manager for UID 2223. ░░ Subject: A start job for unit user@2223.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit user@2223.service has finished successfully. ░░ ░░ The job identifier is 2064. Jul 07 20:16:59 managed-node2 python3.9[60817]: ansible-file Invoked with path=/home/user_quadlet_pod/.config/containers/systemd state=directory owner=user_quadlet_pod group=2223 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:16:59 managed-node2 python3.9[60966]: ansible-ansible.legacy.stat Invoked with path=/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-pod.pod follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Jul 07 20:17:00 managed-node2 python3.9[61086]: ansible-ansible.legacy.copy Invoked with src=/root/.ansible/tmp/ansible-tmp-1751933819.4453251-20404-152886448732131/.source.pod dest=/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-pod.pod owner=user_quadlet_pod group=2223 mode=0644 follow=False _original_basename=systemd.j2 checksum=1884c880482430d8bf2e944b003734fb8b7a462d backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:17:00 managed-node2 sudo[61235]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:17:00 managed-node2 sudo[61235]: root : TTY=pts/0 ; PWD=/root ; USER=user_quadlet_pod ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qycimnmrylvnpkxuzbdovgpddpoemvav ; XDG_RUNTIME_DIR=/run/user/2223 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933820.1430469-20434-39126838474860/AnsiballZ_systemd.py' Jul 07 20:17:00 managed-node2 sudo[61235]: pam_unix(sudo:session): session opened for user user_quadlet_pod(uid=2223) by root(uid=0) Jul 07 20:17:00 managed-node2 python3.9[61237]: ansible-systemd Invoked with daemon_reload=True scope=user daemon_reexec=False no_block=False name=None state=None enabled=None force=None masked=None Jul 07 20:17:00 managed-node2 python3.9[61237]: ansible-systemd [WARNING] Module remote_tmp /home/user_quadlet_pod/.ansible/tmp did not exist and was created with a mode of 0700, this may cause issues when running as another user. To avoid this, create the remote_tmp dir with the correct permissions manually Jul 07 20:17:00 managed-node2 systemd[60658]: Reloading. Jul 07 20:17:00 managed-node2 sudo[61235]: pam_unix(sudo:session): session closed for user user_quadlet_pod Jul 07 20:17:00 managed-node2 sudo[61397]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:17:00 managed-node2 sudo[61397]: root : TTY=pts/0 ; PWD=/root ; USER=user_quadlet_pod ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nnnqmwerpaczbueaimzsjafnuajzqvhs ; XDG_RUNTIME_DIR=/run/user/2223 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933820.7074661-20450-27497593035399/AnsiballZ_systemd.py' Jul 07 20:17:00 managed-node2 sudo[61397]: pam_unix(sudo:session): session opened for user user_quadlet_pod(uid=2223) by root(uid=0) Jul 07 20:17:01 managed-node2 python3.9[61399]: ansible-systemd Invoked with name=quadlet-pod-pod-pod.service scope=user state=started daemon_reload=False daemon_reexec=False no_block=False enabled=None force=None masked=None Jul 07 20:17:01 managed-node2 systemd[60658]: Starting Wait for system level network-online.target as user.... ░░ Subject: A start job for unit UNIT has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has begun execution. ░░ ░░ The job identifier is 25. Jul 07 20:17:01 managed-node2 sh[61403]: active Jul 07 20:17:01 managed-node2 systemd[60658]: Finished Wait for system level network-online.target as user.. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 25. Jul 07 20:17:01 managed-node2 systemd[60658]: Starting quadlet-pod-pod-pod.service... ░░ Subject: A start job for unit UNIT has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has begun execution. ░░ ░░ The job identifier is 13. Jul 07 20:17:01 managed-node2 systemd[60658]: Starting D-Bus User Message Bus... ░░ Subject: A start job for unit UNIT has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has begun execution. ░░ ░░ The job identifier is 26. Jul 07 20:17:01 managed-node2 dbus-broker-launch[61428]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +31: Eavesdropping is deprecated and ignored Jul 07 20:17:01 managed-node2 systemd[60658]: Started D-Bus User Message Bus. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 26. Jul 07 20:17:01 managed-node2 dbus-broker-launch[61428]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +33: Eavesdropping is deprecated and ignored Jul 07 20:17:01 managed-node2 dbus-broker-lau[61428]: Ready Jul 07 20:17:01 managed-node2 systemd[60658]: Created slice Slice /user. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 32. Jul 07 20:17:01 managed-node2 systemd[60658]: Created slice cgroup user-libpod_pod_abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658.slice. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 31. Jul 07 20:17:01 managed-node2 quadlet-pod-pod-pod[61411]: abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658 Jul 07 20:17:01 managed-node2 systemd[60658]: podman-pause-d252ab55.scope: unit configures an IP firewall, but not running as root. Jul 07 20:17:01 managed-node2 systemd[60658]: (This warning is only shown for the first unit using IP firewalling.) Jul 07 20:17:01 managed-node2 systemd[60658]: Started podman-pause-d252ab55.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 35. Jul 07 20:17:01 managed-node2 systemd[60658]: Started libcrun container. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 39. Jul 07 20:17:01 managed-node2 quadlet-pod-pod-pod[61431]: quadlet-pod Jul 07 20:17:01 managed-node2 systemd[60658]: Started quadlet-pod-pod-pod.service. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 13. Jul 07 20:17:01 managed-node2 sudo[61397]: pam_unix(sudo:session): session closed for user user_quadlet_pod Jul 07 20:17:02 managed-node2 python3.9[61604]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:17:02 managed-node2 python3.9[61755]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:17:02 managed-node2 python3.9[61905]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:17:04 managed-node2 python3.9[62055]: ansible-ansible.legacy.command Invoked with creates=/var/lib/systemd/linger/user_quadlet_pod _raw_params=loginctl enable-linger user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None removes=None stdin=None Jul 07 20:17:04 managed-node2 sudo[62204]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:17:04 managed-node2 sudo[62204]: root : TTY=pts/0 ; PWD=/root ; USER=user_quadlet_pod ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-clqautntenvhsnblukvtrjsozolrrutk ; XDG_RUNTIME_DIR=/run/user/2223 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933824.2704933-20565-153115325770627/AnsiballZ_podman_image.py' Jul 07 20:17:04 managed-node2 sudo[62204]: pam_unix(sudo:session): session opened for user user_quadlet_pod(uid=2223) by root(uid=0) Jul 07 20:17:04 managed-node2 systemd[60658]: Started podman-62207.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 44. Jul 07 20:17:04 managed-node2 systemd[60658]: Started podman-62215.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 48. Jul 07 20:17:05 managed-node2 systemd[60658]: Started podman-62241.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 52. Jul 07 20:17:05 managed-node2 sudo[62204]: pam_unix(sudo:session): session closed for user user_quadlet_pod Jul 07 20:17:05 managed-node2 python3.9[62397]: ansible-file Invoked with path=/home/user_quadlet_pod/.config/containers/systemd state=directory owner=user_quadlet_pod group=2223 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:17:06 managed-node2 python3.9[62546]: ansible-ansible.legacy.stat Invoked with path=/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-container.container follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Jul 07 20:17:06 managed-node2 python3.9[62666]: ansible-ansible.legacy.copy Invoked with src=/root/.ansible/tmp/ansible-tmp-1751933826.0010831-20615-171147992892008/.source.container dest=/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-container.container owner=user_quadlet_pod group=2223 mode=0644 follow=False _original_basename=systemd.j2 checksum=f0b5c8159fc3c65bf9310a371751609e4c1ba4c3 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:17:06 managed-node2 sudo[62815]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:17:06 managed-node2 sudo[62815]: root : TTY=pts/0 ; PWD=/root ; USER=user_quadlet_pod ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wvdbdvervsgjztpuoyolguwsbeqeprfj ; XDG_RUNTIME_DIR=/run/user/2223 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933826.7366147-20636-253031789963643/AnsiballZ_systemd.py' Jul 07 20:17:06 managed-node2 sudo[62815]: pam_unix(sudo:session): session opened for user user_quadlet_pod(uid=2223) by root(uid=0) Jul 07 20:17:07 managed-node2 python3.9[62817]: ansible-systemd Invoked with daemon_reload=True scope=user daemon_reexec=False no_block=False name=None state=None enabled=None force=None masked=None Jul 07 20:17:07 managed-node2 systemd[60658]: Reloading. Jul 07 20:17:07 managed-node2 sudo[62815]: pam_unix(sudo:session): session closed for user user_quadlet_pod Jul 07 20:17:07 managed-node2 sudo[62977]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:17:07 managed-node2 sudo[62977]: root : TTY=pts/0 ; PWD=/root ; USER=user_quadlet_pod ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-uowxubkhbahtejstbxtpvjntqjsyenah ; XDG_RUNTIME_DIR=/run/user/2223 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933827.3300688-20652-5834244785399/AnsiballZ_systemd.py' Jul 07 20:17:07 managed-node2 sudo[62977]: pam_unix(sudo:session): session opened for user user_quadlet_pod(uid=2223) by root(uid=0) Jul 07 20:17:07 managed-node2 python3.9[62979]: ansible-systemd Invoked with name=quadlet-pod-container.service scope=user state=started daemon_reload=False daemon_reexec=False no_block=False enabled=None force=None masked=None Jul 07 20:17:07 managed-node2 systemd[60658]: Starting quadlet-pod-container.service... ░░ Subject: A start job for unit UNIT has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has begun execution. ░░ ░░ The job identifier is 56. Jul 07 20:17:07 managed-node2 systemd[60658]: Started quadlet-pod-container.service. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 56. Jul 07 20:17:07 managed-node2 quadlet-pod-container[62982]: b13865d1720ba2247935f8a907f43f29957f51e6b9e3476a39eb71da7ad9ebb6 Jul 07 20:17:07 managed-node2 sudo[62977]: pam_unix(sudo:session): session closed for user user_quadlet_pod Jul 07 20:17:08 managed-node2 python3.9[63145]: ansible-ansible.legacy.command Invoked with _raw_params=cat /home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-container.container _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:17:08 managed-node2 python3.9[63295]: ansible-ansible.legacy.command Invoked with _raw_params=cat /home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-pod.pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:17:09 managed-node2 sudo[63445]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:17:09 managed-node2 sudo[63445]: root : TTY=pts/0 ; PWD=/root ; USER=user_quadlet_pod ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-smtrjtflbvxbkabzbxqvalutkdqpmdkb ; XDG_RUNTIME_DIR=/run/user/2223 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933828.9926739-20712-141410485382073/AnsiballZ_command.py' Jul 07 20:17:09 managed-node2 sudo[63445]: pam_unix(sudo:session): session opened for user user_quadlet_pod(uid=2223) by root(uid=0) Jul 07 20:17:09 managed-node2 python3.9[63447]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod inspect quadlet-pod --format '{{range .Containers}}{{.Name}} {{end}}' _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:17:09 managed-node2 systemd[60658]: Started podman-63448.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 70. Jul 07 20:17:09 managed-node2 sudo[63445]: pam_unix(sudo:session): session closed for user user_quadlet_pod Jul 07 20:17:09 managed-node2 python3.9[63604]: ansible-stat Invoked with path=/var/lib/systemd/linger/user_quadlet_pod follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:17:11 managed-node2 python3.9[63904]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:17:12 managed-node2 python3.9[64059]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:17:13 managed-node2 python3.9[64210]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:17:13 managed-node2 python3.9[64360]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:17:15 managed-node2 python3.9[64510]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:17:16 managed-node2 python3.9[64661]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:17:16 managed-node2 python3.9[64811]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:17:17 managed-node2 python3.9[64961]: ansible-stat Invoked with path=/run/user/2223 follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:17:18 managed-node2 sudo[65112]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:17:18 managed-node2 sudo[65112]: root : TTY=pts/0 ; PWD=/root ; USER=user_quadlet_pod ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mdrwdexxoegkvbfvoaccgnpcnrcnmlbz ; XDG_RUNTIME_DIR=/run/user/2223 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933838.0379803-20995-129290334053056/AnsiballZ_systemd.py' Jul 07 20:17:18 managed-node2 sudo[65112]: pam_unix(sudo:session): session opened for user user_quadlet_pod(uid=2223) by root(uid=0) Jul 07 20:17:18 managed-node2 python3.9[65114]: ansible-systemd Invoked with name=quadlet-pod-container.service scope=user state=stopped enabled=False force=True daemon_reload=False daemon_reexec=False no_block=False masked=None Jul 07 20:17:18 managed-node2 systemd[60658]: Reloading. Jul 07 20:17:18 managed-node2 systemd[60658]: Stopping quadlet-pod-container.service... ░░ Subject: A stop job for unit UNIT has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has begun execution. ░░ ░░ The job identifier is 74. Jul 07 20:17:28 managed-node2 quadlet-pod-container[65129]: time="2025-07-07T20:17:28-04:00" level=warning msg="StopSignal SIGTERM failed to stop container quadlet-pod-container in 10 seconds, resorting to SIGKILL" Jul 07 20:17:28 managed-node2 quadlet-pod-container[65129]: b13865d1720ba2247935f8a907f43f29957f51e6b9e3476a39eb71da7ad9ebb6 Jul 07 20:17:28 managed-node2 systemd[60658]: quadlet-pod-container.service: Main process exited, code=exited, status=137/n/a ░░ Subject: Unit process exited ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ An ExecStart= process belonging to unit UNIT has exited. ░░ ░░ The process' exit code is 'exited' and its exit status is 137. Jul 07 20:17:28 managed-node2 systemd[60658]: Removed slice cgroup user-libpod_pod_abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658.slice. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 75 and the job result is done. Jul 07 20:17:28 managed-node2 systemd[60658]: user-libpod_pod_abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658.slice: Failed to open /run/user/2223/systemd/transient/user-libpod_pod_abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658.slice: No such file or directory Jul 07 20:17:28 managed-node2 systemd[60658]: quadlet-pod-container.service: Failed with result 'exit-code'. ░░ Subject: Unit failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit UNIT has entered the 'failed' state with result 'exit-code'. Jul 07 20:17:28 managed-node2 systemd[60658]: Stopped quadlet-pod-container.service. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 74 and the job result is done. Jul 07 20:17:28 managed-node2 systemd[60658]: user-libpod_pod_abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658.slice: Failed to open /run/user/2223/systemd/transient/user-libpod_pod_abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658.slice: No such file or directory Jul 07 20:17:28 managed-node2 quadlet-pod-pod-pod[65162]: quadlet-pod Jul 07 20:17:28 managed-node2 sudo[65112]: pam_unix(sudo:session): session closed for user user_quadlet_pod Jul 07 20:17:28 managed-node2 systemd[60658]: user-libpod_pod_abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658.slice: Failed to open /run/user/2223/systemd/transient/user-libpod_pod_abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658.slice: No such file or directory Jul 07 20:17:28 managed-node2 quadlet-pod-pod-pod[65176]: abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658 Jul 07 20:17:29 managed-node2 python3.9[65334]: ansible-stat Invoked with path=/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-container.container follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:17:30 managed-node2 python3.9[65634]: ansible-ansible.legacy.command Invoked with _raw_params=set -x set -o pipefail exec 1>&2 #podman volume rm --all #podman network prune -f podman volume ls podman network ls podman secret ls podman container ls podman pod ls podman images systemctl list-units | grep quadlet systemctl list-unit-files | grep quadlet ls -alrtF /etc/containers/systemd /usr/libexec/podman/quadlet -dryrun -v -no-kmsg-log _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:17:30 managed-node2 python3.9[65839]: ansible-ansible.legacy.command Invoked with _raw_params=grep type=AVC /var/log/audit/audit.log _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:17:31 managed-node2 python3.9[65989]: ansible-ansible.legacy.command Invoked with _raw_params=journalctl -ex _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None TASK [Cleanup user] ************************************************************ task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:159 Monday 07 July 2025 20:17:31 -0400 (0:00:00.429) 0:00:59.201 *********** included: fedora.linux_system_roles.podman for managed-node2 TASK [fedora.linux_system_roles.podman : Set platform/version specific variables] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:3 Monday 07 July 2025 20:17:31 -0400 (0:00:00.077) 0:00:59.278 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Ensure ansible_facts used by role] **** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:3 Monday 07 July 2025 20:17:31 -0400 (0:00:00.053) 0:00:59.332 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_required_facts | difference(ansible_facts.keys() | list) | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Check if system is ostree] ************ task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:11 Monday 07 July 2025 20:17:31 -0400 (0:00:00.038) 0:00:59.370 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_is_ostree is defined", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set flag to indicate system is ostree] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:16 Monday 07 July 2025 20:17:31 -0400 (0:00:00.030) 0:00:59.400 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_is_ostree is defined", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Check if transactional-update exists in /sbin] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:23 Monday 07 July 2025 20:17:31 -0400 (0:00:00.031) 0:00:59.432 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_is_transactional is defined", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set flag if transactional-update exists] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:28 Monday 07 July 2025 20:17:31 -0400 (0:00:00.030) 0:00:59.463 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_is_transactional is defined", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set platform/version specific variables] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:32 Monday 07 July 2025 20:17:31 -0400 (0:00:00.077) 0:00:59.540 *********** ok: [managed-node2] => (item=RedHat.yml) => { "ansible_facts": { "__podman_packages": [ "podman", "shadow-utils-subid" ] }, "ansible_included_var_files": [ "/tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/vars/RedHat.yml" ], "ansible_loop_var": "item", "changed": false, "item": "RedHat.yml" } skipping: [managed-node2] => (item=CentOS.yml) => { "ansible_loop_var": "item", "changed": false, "false_condition": "__vars_file is file", "item": "CentOS.yml", "skip_reason": "Conditional result was False" } skipping: [managed-node2] => (item=CentOS_9.yml) => { "ansible_loop_var": "item", "changed": false, "false_condition": "__vars_file is file", "item": "CentOS_9.yml", "skip_reason": "Conditional result was False" } skipping: [managed-node2] => (item=CentOS_9.yml) => { "ansible_loop_var": "item", "changed": false, "false_condition": "__vars_file is file", "item": "CentOS_9.yml", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Gather the package facts] ************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:6 Monday 07 July 2025 20:17:31 -0400 (0:00:00.062) 0:00:59.603 *********** ok: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Enable copr if requested] ************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:10 Monday 07 July 2025 20:17:32 -0400 (0:00:00.788) 0:01:00.391 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_use_copr | d(false)", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Ensure required packages are installed] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:14 Monday 07 July 2025 20:17:32 -0400 (0:00:00.031) 0:01:00.423 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "(__podman_packages | difference(ansible_facts.packages)) | list | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Notify user that reboot is needed to apply changes] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:28 Monday 07 July 2025 20:17:32 -0400 (0:00:00.037) 0:01:00.460 *********** skipping: [managed-node2] => { "false_condition": "__podman_is_transactional | d(false)" } TASK [fedora.linux_system_roles.podman : Reboot transactional update systems] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:33 Monday 07 July 2025 20:17:32 -0400 (0:00:00.031) 0:01:00.492 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_is_transactional | d(false)", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if reboot is needed and not set] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:38 Monday 07 July 2025 20:17:32 -0400 (0:00:00.030) 0:01:00.523 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_is_transactional | d(false)", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get podman version] ******************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:46 Monday 07 July 2025 20:17:32 -0400 (0:00:00.032) 0:01:00.555 *********** ok: [managed-node2] => { "changed": false, "cmd": [ "podman", "--version" ], "delta": "0:00:00.026748", "end": "2025-07-07 20:17:33.134187", "rc": 0, "start": "2025-07-07 20:17:33.107439" } STDOUT: podman version 5.5.1 TASK [fedora.linux_system_roles.podman : Set podman version] ******************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:52 Monday 07 July 2025 20:17:33 -0400 (0:00:00.380) 0:01:00.935 *********** ok: [managed-node2] => { "ansible_facts": { "podman_version": "5.5.1" }, "changed": false } TASK [fedora.linux_system_roles.podman : Podman package version must be 4.2 or later] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:56 Monday 07 July 2025 20:17:33 -0400 (0:00:00.033) 0:01:00.969 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_version is version(\"4.2\", \"<\")", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Podman package version must be 4.4 or later for quadlet, secrets] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:63 Monday 07 July 2025 20:17:33 -0400 (0:00:00.034) 0:01:01.003 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_version is version(\"4.4\", \"<\")", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Podman package version must be 4.4 or later for quadlet, secrets] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:73 Monday 07 July 2025 20:17:33 -0400 (0:00:00.039) 0:01:01.042 *********** META: end_host conditional evaluated to False, continuing execution for managed-node2 skipping: [managed-node2] => { "skip_reason": "end_host conditional evaluated to False, continuing execution for managed-node2" } MSG: end_host conditional evaluated to false, continuing execution for managed-node2 TASK [fedora.linux_system_roles.podman : Podman package version must be 5.0 or later for Pod quadlets] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:80 Monday 07 July 2025 20:17:33 -0400 (0:00:00.038) 0:01:01.081 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_version is version(\"5.0\", \"<\")", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Podman package version must be 5.0 or later for Pod quadlets] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:96 Monday 07 July 2025 20:17:33 -0400 (0:00:00.053) 0:01:01.135 *********** META: end_host conditional evaluated to False, continuing execution for managed-node2 skipping: [managed-node2] => { "skip_reason": "end_host conditional evaluated to False, continuing execution for managed-node2" } MSG: end_host conditional evaluated to false, continuing execution for managed-node2 TASK [fedora.linux_system_roles.podman : Check user and group information] ***** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:109 Monday 07 July 2025 20:17:33 -0400 (0:00:00.055) 0:01:01.191 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Get user information] ***************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:2 Monday 07 July 2025 20:17:33 -0400 (0:00:00.062) 0:01:01.253 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "'getent_passwd' not in ansible_facts or __podman_user not in ansible_facts['getent_passwd']", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user does not exist] ********** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:9 Monday 07 July 2025 20:17:33 -0400 (0:00:00.128) 0:01:01.382 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not ansible_facts[\"getent_passwd\"][__podman_user]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set group for podman user] ************ task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:16 Monday 07 July 2025 20:17:33 -0400 (0:00:00.036) 0:01:01.418 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_group": "2223" }, "changed": false } TASK [fedora.linux_system_roles.podman : See if getsubids exists] ************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:31 Monday 07 July 2025 20:17:33 -0400 (0:00:00.042) 0:01:01.461 *********** ok: [managed-node2] => { "changed": false, "stat": { "atime": 1751933455.3375134, "attr_flags": "", "attributes": [], "block_size": 4096, "blocks": 32, "charset": "binary", "checksum": "8bedde2dbca15219e1a3b95a68a8c0d26a92ba62", "ctime": 1751933428.1541803, "dev": 51713, "device_type": 0, "executable": true, "exists": true, "gid": 0, "gr_name": "root", "inode": 2118, "isblk": false, "ischr": false, "isdir": false, "isfifo": false, "isgid": false, "islnk": false, "isreg": true, "issock": false, "isuid": false, "mimetype": "application/x-pie-executable", "mode": "0755", "mtime": 1748273472.0, "nlink": 1, "path": "/usr/bin/getsubids", "pw_name": "root", "readable": true, "rgrp": true, "roth": true, "rusr": true, "size": 15496, "uid": 0, "version": "2386316427", "wgrp": false, "woth": false, "writeable": true, "wusr": true, "xgrp": true, "xoth": true, "xusr": true } } TASK [fedora.linux_system_roles.podman : Check with getsubids for user subuids] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:42 Monday 07 July 2025 20:17:34 -0400 (0:00:00.367) 0:01:01.828 *********** ok: [managed-node2] => { "changed": false, "cmd": [ "getsubids", "user_quadlet_pod" ], "delta": "0:00:00.003734", "end": "2025-07-07 20:17:34.390557", "rc": 0, "start": "2025-07-07 20:17:34.386823" } STDOUT: 0: user_quadlet_pod 165536 65536 TASK [fedora.linux_system_roles.podman : Check with getsubids for user subgids] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:47 Monday 07 July 2025 20:17:34 -0400 (0:00:00.361) 0:01:02.190 *********** ok: [managed-node2] => { "changed": false, "cmd": [ "getsubids", "-g", "user_quadlet_pod" ], "delta": "0:00:00.005717", "end": "2025-07-07 20:17:34.754499", "rc": 0, "start": "2025-07-07 20:17:34.748782" } STDOUT: 0: user_quadlet_pod 165536 65536 TASK [fedora.linux_system_roles.podman : Set user subuid and subgid info] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:52 Monday 07 July 2025 20:17:34 -0400 (0:00:00.365) 0:01:02.555 *********** ok: [managed-node2] => { "ansible_facts": { "podman_subgid_info": { "user_quadlet_pod": { "range": 65536, "start": 165536 } }, "podman_subuid_info": { "user_quadlet_pod": { "range": 65536, "start": 165536 } } }, "changed": false } TASK [fedora.linux_system_roles.podman : Get subuid file] ********************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:65 Monday 07 July 2025 20:17:34 -0400 (0:00:00.048) 0:01:02.604 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get subgid file] ********************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:70 Monday 07 July 2025 20:17:34 -0400 (0:00:00.035) 0:01:02.639 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set user subuid and subgid info] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:75 Monday 07 July 2025 20:17:34 -0400 (0:00:00.034) 0:01:02.674 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user not in subuid file] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:85 Monday 07 July 2025 20:17:34 -0400 (0:00:00.033) 0:01:02.708 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user not in subgid file] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:92 Monday 07 July 2025 20:17:35 -0400 (0:00:00.034) 0:01:02.742 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set config file paths] **************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:115 Monday 07 July 2025 20:17:35 -0400 (0:00:00.034) 0:01:02.776 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_container_conf_file": "/home/user_quadlet_pod/.config/containers/containers.conf.d/50-systemroles.conf", "__podman_parent_mode": "0700", "__podman_parent_path": "/home/user_quadlet_pod/.config/containers", "__podman_policy_json_file": "/home/user_quadlet_pod/.config/containers/policy.json", "__podman_registries_conf_file": "/home/user_quadlet_pod/.config/containers/registries.conf.d/50-systemroles.conf", "__podman_storage_conf_file": "/home/user_quadlet_pod/.config/containers/storage.conf" }, "changed": false } TASK [fedora.linux_system_roles.podman : Handle container.conf.d] ************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:126 Monday 07 July 2025 20:17:35 -0400 (0:00:00.043) 0:01:02.820 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_container_conf_d.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Ensure containers.d exists] *********** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_container_conf_d.yml:5 Monday 07 July 2025 20:17:35 -0400 (0:00:00.060) 0:01:02.880 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_containers_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Update container config file] ********* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_container_conf_d.yml:13 Monday 07 July 2025 20:17:35 -0400 (0:00:00.033) 0:01:02.914 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_containers_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Handle registries.conf.d] ************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:129 Monday 07 July 2025 20:17:35 -0400 (0:00:00.032) 0:01:02.946 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_registries_conf_d.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Ensure registries.d exists] *********** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_registries_conf_d.yml:5 Monday 07 July 2025 20:17:35 -0400 (0:00:00.066) 0:01:03.012 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_registries_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Update registries config file] ******** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_registries_conf_d.yml:13 Monday 07 July 2025 20:17:35 -0400 (0:00:00.037) 0:01:03.050 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_registries_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Handle storage.conf] ****************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:132 Monday 07 July 2025 20:17:35 -0400 (0:00:00.030) 0:01:03.081 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_storage_conf.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Ensure storage.conf parent dir exists] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_storage_conf.yml:7 Monday 07 July 2025 20:17:35 -0400 (0:00:00.107) 0:01:03.189 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_storage_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Update storage config file] *********** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_storage_conf.yml:15 Monday 07 July 2025 20:17:35 -0400 (0:00:00.033) 0:01:03.223 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_storage_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Handle policy.json] ******************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:135 Monday 07 July 2025 20:17:35 -0400 (0:00:00.032) 0:01:03.255 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_policy_json.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Ensure policy.json parent dir exists] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_policy_json.yml:8 Monday 07 July 2025 20:17:35 -0400 (0:00:00.065) 0:01:03.321 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_policy_json | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Stat the policy.json file] ************ task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_policy_json.yml:16 Monday 07 July 2025 20:17:35 -0400 (0:00:00.032) 0:01:03.353 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_policy_json | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get the existing policy.json] ********* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_policy_json.yml:21 Monday 07 July 2025 20:17:35 -0400 (0:00:00.033) 0:01:03.387 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_policy_json | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Write new policy.json file] *********** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_policy_json.yml:27 Monday 07 July 2025 20:17:35 -0400 (0:00:00.032) 0:01:03.419 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_policy_json | length > 0", "skip_reason": "Conditional result was False" } TASK [Manage firewall for specified ports] ************************************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:141 Monday 07 July 2025 20:17:35 -0400 (0:00:00.033) 0:01:03.452 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_firewall | length > 0", "skip_reason": "Conditional result was False" } TASK [Manage selinux for specified ports] ************************************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:148 Monday 07 July 2025 20:17:35 -0400 (0:00:00.031) 0:01:03.484 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "podman_selinux_ports | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Keep track of users that need to cancel linger] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:155 Monday 07 July 2025 20:17:35 -0400 (0:00:00.033) 0:01:03.517 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_cancel_user_linger": [] }, "changed": false } TASK [fedora.linux_system_roles.podman : Handle certs.d files - present] ******* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:159 Monday 07 July 2025 20:17:35 -0400 (0:00:00.032) 0:01:03.550 *********** skipping: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Handle credential files - present] **** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:168 Monday 07 July 2025 20:17:35 -0400 (0:00:00.030) 0:01:03.580 *********** skipping: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Handle secrets] *********************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:177 Monday 07 July 2025 20:17:35 -0400 (0:00:00.028) 0:01:03.609 *********** skipping: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Handle Kubernetes specifications] ***** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:184 Monday 07 July 2025 20:17:35 -0400 (0:00:00.029) 0:01:03.638 *********** skipping: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Handle Quadlet specifications] ******** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:191 Monday 07 July 2025 20:17:35 -0400 (0:00:00.029) 0:01:03.667 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml for managed-node2 => (item=(censored due to no_log)) included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml for managed-node2 => (item=(censored due to no_log)) TASK [fedora.linux_system_roles.podman : Set per-container variables part 0] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:14 Monday 07 July 2025 20:17:36 -0400 (0:00:00.094) 0:01:03.761 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_quadlet_file_src": "", "__podman_quadlet_spec": { "Container": { "ContainerName": "quadlet-pod-container", "Exec": "/bin/busybox-extras httpd -f -p 80", "Image": "quay.io/libpod/testimage:20210610", "Pod": "quadlet-pod-pod.pod" }, "Install": { "WantedBy": "default.target" } }, "__podman_quadlet_str": "", "__podman_quadlet_template_src": "" }, "changed": false } TASK [fedora.linux_system_roles.podman : Set per-container variables part 1] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:25 Monday 07 July 2025 20:17:36 -0400 (0:00:00.044) 0:01:03.806 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_continue_if_pull_fails": false, "__podman_pull_image": true, "__podman_state": "absent", "__podman_systemd_unit_scope": "", "__podman_user": "user_quadlet_pod" }, "changed": false } TASK [fedora.linux_system_roles.podman : Fail if no quadlet spec is given] ***** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:35 Monday 07 July 2025 20:17:36 -0400 (0:00:00.040) 0:01:03.846 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_quadlet_spec | length == 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set per-container variables part 2] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:48 Monday 07 July 2025 20:17:36 -0400 (0:00:00.081) 0:01:03.928 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_quadlet_name": "quadlet-pod-container", "__podman_quadlet_type": "container", "__podman_rootless": true }, "changed": false } TASK [fedora.linux_system_roles.podman : Check user and group information] ***** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:57 Monday 07 July 2025 20:17:36 -0400 (0:00:00.048) 0:01:03.976 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Get user information] ***************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:2 Monday 07 July 2025 20:17:36 -0400 (0:00:00.066) 0:01:04.042 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "'getent_passwd' not in ansible_facts or __podman_user not in ansible_facts['getent_passwd']", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user does not exist] ********** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:9 Monday 07 July 2025 20:17:36 -0400 (0:00:00.036) 0:01:04.079 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not ansible_facts[\"getent_passwd\"][__podman_user]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set group for podman user] ************ task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:16 Monday 07 July 2025 20:17:36 -0400 (0:00:00.036) 0:01:04.116 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_group": "2223" }, "changed": false } TASK [fedora.linux_system_roles.podman : See if getsubids exists] ************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:31 Monday 07 July 2025 20:17:36 -0400 (0:00:00.046) 0:01:04.162 *********** ok: [managed-node2] => { "changed": false, "stat": { "atime": 1751933455.3375134, "attr_flags": "", "attributes": [], "block_size": 4096, "blocks": 32, "charset": "binary", "checksum": "8bedde2dbca15219e1a3b95a68a8c0d26a92ba62", "ctime": 1751933428.1541803, "dev": 51713, "device_type": 0, "executable": true, "exists": true, "gid": 0, "gr_name": "root", "inode": 2118, "isblk": false, "ischr": false, "isdir": false, "isfifo": false, "isgid": false, "islnk": false, "isreg": true, "issock": false, "isuid": false, "mimetype": "application/x-pie-executable", "mode": "0755", "mtime": 1748273472.0, "nlink": 1, "path": "/usr/bin/getsubids", "pw_name": "root", "readable": true, "rgrp": true, "roth": true, "rusr": true, "size": 15496, "uid": 0, "version": "2386316427", "wgrp": false, "woth": false, "writeable": true, "wusr": true, "xgrp": true, "xoth": true, "xusr": true } } TASK [fedora.linux_system_roles.podman : Check with getsubids for user subuids] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:42 Monday 07 July 2025 20:17:36 -0400 (0:00:00.367) 0:01:04.530 *********** ok: [managed-node2] => { "changed": false, "cmd": [ "getsubids", "user_quadlet_pod" ], "delta": "0:00:00.003569", "end": "2025-07-07 20:17:37.089125", "rc": 0, "start": "2025-07-07 20:17:37.085556" } STDOUT: 0: user_quadlet_pod 165536 65536 TASK [fedora.linux_system_roles.podman : Check with getsubids for user subgids] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:47 Monday 07 July 2025 20:17:37 -0400 (0:00:00.357) 0:01:04.887 *********** ok: [managed-node2] => { "changed": false, "cmd": [ "getsubids", "-g", "user_quadlet_pod" ], "delta": "0:00:00.005621", "end": "2025-07-07 20:17:37.452486", "rc": 0, "start": "2025-07-07 20:17:37.446865" } STDOUT: 0: user_quadlet_pod 165536 65536 TASK [fedora.linux_system_roles.podman : Set user subuid and subgid info] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:52 Monday 07 July 2025 20:17:37 -0400 (0:00:00.366) 0:01:05.254 *********** ok: [managed-node2] => { "ansible_facts": { "podman_subgid_info": { "user_quadlet_pod": { "range": 65536, "start": 165536 } }, "podman_subuid_info": { "user_quadlet_pod": { "range": 65536, "start": 165536 } } }, "changed": false } TASK [fedora.linux_system_roles.podman : Get subuid file] ********************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:65 Monday 07 July 2025 20:17:37 -0400 (0:00:00.047) 0:01:05.301 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get subgid file] ********************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:70 Monday 07 July 2025 20:17:37 -0400 (0:00:00.034) 0:01:05.336 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set user subuid and subgid info] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:75 Monday 07 July 2025 20:17:37 -0400 (0:00:00.032) 0:01:05.369 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user not in subuid file] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:85 Monday 07 July 2025 20:17:37 -0400 (0:00:00.033) 0:01:05.402 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user not in subgid file] ****** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:92 Monday 07 July 2025 20:17:37 -0400 (0:00:00.032) 0:01:05.435 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set per-container variables part 3] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:62 Monday 07 July 2025 20:17:37 -0400 (0:00:00.033) 0:01:05.468 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_activate_systemd_unit": true, "__podman_images_found": [ "quay.io/libpod/testimage:20210610" ], "__podman_kube_yamls_raw": "", "__podman_service_name": "quadlet-pod-container.service", "__podman_systemd_scope": "user", "__podman_user_home_dir": "/home/user_quadlet_pod", "__podman_xdg_runtime_dir": "/run/user/2223" }, "changed": false } TASK [fedora.linux_system_roles.podman : Set per-container variables part 4] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:73 Monday 07 July 2025 20:17:37 -0400 (0:00:00.053) 0:01:05.521 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_quadlet_path": "/home/user_quadlet_pod/.config/containers/systemd" }, "changed": false } TASK [fedora.linux_system_roles.podman : Get kube yaml contents] *************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:77 Monday 07 July 2025 20:17:37 -0400 (0:00:00.036) 0:01:05.557 *********** skipping: [managed-node2] => { "changed": false, "false_condition": "__podman_state != \"absent\"", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set per-container variables part 5] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:88 Monday 07 July 2025 20:17:37 -0400 (0:00:00.029) 0:01:05.587 *********** ok: [managed-node2] => { "ansible_facts": { "__podman_images": [ "quay.io/libpod/testimage:20210610" ], "__podman_quadlet_file": "/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-container.container", "__podman_volumes": [] }, "changed": false } TASK [fedora.linux_system_roles.podman : Set per-container variables part 6] *** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:106 Monday 07 July 2025 20:17:37 -0400 (0:00:00.075) 0:01:05.663 *********** ok: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Cleanup quadlets] ********************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:113 Monday 07 July 2025 20:17:37 -0400 (0:00:00.037) 0:01:05.701 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/cleanup_quadlet_spec.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Stat XDG_RUNTIME_DIR] ***************** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/cleanup_quadlet_spec.yml:4 Monday 07 July 2025 20:17:38 -0400 (0:00:00.117) 0:01:05.818 *********** ok: [managed-node2] => { "changed": false, "stat": { "atime": 1751933818.6829095, "attr_flags": "", "attributes": [], "block_size": 4096, "blocks": 0, "charset": "binary", "ctime": 1751933848.893101, "dev": 65, "device_type": 0, "executable": true, "exists": true, "gid": 2223, "gr_name": "user_quadlet_pod", "inode": 1, "isblk": false, "ischr": false, "isdir": true, "isfifo": false, "isgid": false, "islnk": false, "isreg": false, "issock": false, "isuid": false, "mimetype": "inode/directory", "mode": "0700", "mtime": 1751933848.893101, "nlink": 7, "path": "/run/user/2223", "pw_name": "user_quadlet_pod", "readable": true, "rgrp": false, "roth": false, "rusr": true, "size": 160, "uid": 2223, "version": null, "wgrp": false, "woth": false, "writeable": true, "wusr": true, "xgrp": false, "xoth": false, "xusr": true } } TASK [fedora.linux_system_roles.podman : Stop and disable service] ************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/cleanup_quadlet_spec.yml:12 Monday 07 July 2025 20:17:38 -0400 (0:00:00.369) 0:01:06.188 *********** changed: [managed-node2] => { "changed": true, "enabled": false, "failed_when_result": false, "name": "quadlet-pod-container.service", "state": "stopped", "status": { "AccessSELinuxContext": "unconfined_u:object_r:user_tmp_t:s0", "ActiveEnterTimestamp": "Mon 2025-07-07 20:17:07 EDT", "ActiveEnterTimestampMonotonic": "853213205", "ActiveExitTimestamp": "Mon 2025-07-07 20:17:18 EDT", "ActiveExitTimestampMonotonic": "863891669", "ActiveState": "failed", "After": "-.mount app.slice basic.target run-user-2223.mount podman-user-wait-network-online.service quadlet-pod-pod-pod.service", "AllowIsolate": "no", "AssertResult": "yes", "AssertTimestamp": "Mon 2025-07-07 20:17:07 EDT", "AssertTimestampMonotonic": "853089335", "Before": "shutdown.target default.target", "BindsTo": "quadlet-pod-pod-pod.service", "BlockIOAccounting": "no", "BlockIOWeight": "[not set]", "CPUAccounting": "yes", "CPUAffinityFromNUMA": "no", "CPUQuotaPerSecUSec": "infinity", "CPUQuotaPeriodUSec": "infinity", "CPUSchedulingPolicy": "0", "CPUSchedulingPriority": "0", "CPUSchedulingResetOnFork": "no", "CPUShares": "[not set]", "CPUUsageNSec": "286274000", "CPUWeight": "[not set]", "CacheDirectoryMode": "0755", "CanFreeze": "yes", "CanIsolate": "no", "CanReload": "no", "CanStart": "yes", "CanStop": "yes", "CapabilityBoundingSet": "cap_chown cap_dac_override cap_dac_read_search cap_fowner cap_fsetid cap_kill cap_setgid cap_setuid cap_setpcap cap_linux_immutable cap_net_bind_service cap_net_broadcast cap_net_admin cap_net_raw cap_ipc_lock cap_ipc_owner cap_sys_module cap_sys_rawio cap_sys_chroot cap_sys_ptrace cap_sys_pacct cap_sys_admin cap_sys_boot cap_sys_nice cap_sys_resource cap_sys_time cap_sys_tty_config cap_mknod cap_lease cap_audit_write cap_audit_control cap_setfcap cap_mac_override cap_mac_admin cap_syslog cap_wake_alarm cap_block_suspend cap_audit_read cap_perfmon cap_bpf cap_checkpoint_restore", "CleanResult": "success", "CollectMode": "inactive", "ConditionResult": "yes", "ConditionTimestamp": "Mon 2025-07-07 20:17:07 EDT", "ConditionTimestampMonotonic": "853089330", "ConfigurationDirectoryMode": "0755", "Conflicts": "shutdown.target", "ControlGroupId": "11933", "ControlPID": "0", "CoredumpFilter": "0x33", "DefaultDependencies": "yes", "DefaultMemoryLow": "0", "DefaultMemoryMin": "0", "Delegate": "yes", "DelegateControllers": "cpu cpuacct cpuset io blkio memory devices pids bpf-firewall bpf-devices bpf-foreign bpf-socket-bind bpf-restrict-network-interfaces", "Description": "quadlet-pod-container.service", "DevicePolicy": "auto", "DynamicUser": "no", "Environment": "PODMAN_SYSTEMD_UNIT=quadlet-pod-container.service", "ExecMainCode": "1", "ExecMainExitTimestamp": "Mon 2025-07-07 20:17:28 EDT", "ExecMainExitTimestampMonotonic": "874003228", "ExecMainPID": "62993", "ExecMainStartTimestamp": "Mon 2025-07-07 20:17:07 EDT", "ExecMainStartTimestampMonotonic": "853213135", "ExecMainStatus": "137", "ExecStart": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman run --name quadlet-pod-container --cidfile=/run/user/2223/quadlet-pod-container.cid --replace --rm --cgroups=split --sdnotify=conmon -d --pod-id-file /run/user/2223/quadlet-pod-pod-pod.pod-id quay.io/libpod/testimage:20210610 /bin/busybox-extras httpd -f -p 80 ; ignore_errors=no ; start_time=[Mon 2025-07-07 20:17:07 EDT] ; stop_time=[Mon 2025-07-07 20:17:28 EDT] ; pid=62993 ; code=exited ; status=137 }", "ExecStartEx": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman run --name quadlet-pod-container --cidfile=/run/user/2223/quadlet-pod-container.cid --replace --rm --cgroups=split --sdnotify=conmon -d --pod-id-file /run/user/2223/quadlet-pod-pod-pod.pod-id quay.io/libpod/testimage:20210610 /bin/busybox-extras httpd -f -p 80 ; flags= ; start_time=[Mon 2025-07-07 20:17:07 EDT] ; stop_time=[Mon 2025-07-07 20:17:28 EDT] ; pid=62993 ; code=exited ; status=137 }", "ExecStop": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman rm -v -f -i --cidfile=/run/user/2223/quadlet-pod-container.cid ; ignore_errors=no ; start_time=[Mon 2025-07-07 20:17:18 EDT] ; stop_time=[Mon 2025-07-07 20:17:28 EDT] ; pid=65129 ; code=exited ; status=0 }", "ExecStopEx": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman rm -v -f -i --cidfile=/run/user/2223/quadlet-pod-container.cid ; flags= ; start_time=[Mon 2025-07-07 20:17:18 EDT] ; stop_time=[Mon 2025-07-07 20:17:28 EDT] ; pid=65129 ; code=exited ; status=0 }", "ExecStopPost": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman rm -v -f -i --cidfile=/run/user/2223/quadlet-pod-container.cid ; ignore_errors=yes ; start_time=[Mon 2025-07-07 20:17:28 EDT] ; stop_time=[Mon 2025-07-07 20:17:28 EDT] ; pid=65167 ; code=exited ; status=0 }", "ExecStopPostEx": "{ path=/usr/bin/podman ; argv[]=/usr/bin/podman rm -v -f -i --cidfile=/run/user/2223/quadlet-pod-container.cid ; flags=ignore-failure ; start_time=[Mon 2025-07-07 20:17:28 EDT] ; stop_time=[Mon 2025-07-07 20:17:28 EDT] ; pid=65167 ; code=exited ; status=0 }", "ExitType": "main", "FailureAction": "none", "FileDescriptorStoreMax": "0", "FinalKillSignal": "9", "FragmentPath": "/run/user/2223/systemd/generator/quadlet-pod-container.service", "FreezerState": "running", "GID": "[not set]", "GuessMainPID": "yes", "IOAccounting": "no", "IOReadBytes": "18446744073709551615", "IOReadOperations": "18446744073709551615", "IOSchedulingClass": "2", "IOSchedulingPriority": "4", "IOWeight": "[not set]", "IOWriteBytes": "18446744073709551615", "IOWriteOperations": "18446744073709551615", "IPAccounting": "no", "IPEgressBytes": "[no data]", "IPEgressPackets": "[no data]", "IPIngressBytes": "[no data]", "IPIngressPackets": "[no data]", "Id": "quadlet-pod-container.service", "IgnoreOnIsolate": "no", "IgnoreSIGPIPE": "yes", "InactiveEnterTimestamp": "Mon 2025-07-07 20:17:28 EDT", "InactiveEnterTimestampMonotonic": "874147604", "InactiveExitTimestamp": "Mon 2025-07-07 20:17:07 EDT", "InactiveExitTimestampMonotonic": "853099444", "InvocationID": "c1dce7ce163540689b9f034da0546997", "JobRunningTimeoutUSec": "infinity", "JobTimeoutAction": "none", "JobTimeoutUSec": "infinity", "KeyringMode": "inherit", "KillMode": "mixed", "KillSignal": "15", "LimitAS": "infinity", "LimitASSoft": "infinity", "LimitCORE": "infinity", "LimitCORESoft": "infinity", "LimitCPU": "infinity", "LimitCPUSoft": "infinity", "LimitDATA": "infinity", "LimitDATASoft": "infinity", "LimitFSIZE": "infinity", "LimitFSIZESoft": "infinity", "LimitLOCKS": "infinity", "LimitLOCKSSoft": "infinity", "LimitMEMLOCK": "8388608", "LimitMEMLOCKSoft": "8388608", "LimitMSGQUEUE": "819200", "LimitMSGQUEUESoft": "819200", "LimitNICE": "0", "LimitNICESoft": "0", "LimitNOFILE": "524288", "LimitNOFILESoft": "1024", "LimitNPROC": "13688", "LimitNPROCSoft": "13688", "LimitRSS": "infinity", "LimitRSSSoft": "infinity", "LimitRTPRIO": "0", "LimitRTPRIOSoft": "0", "LimitRTTIME": "infinity", "LimitRTTIMESoft": "infinity", "LimitSIGPENDING": "13688", "LimitSIGPENDINGSoft": "13688", "LimitSTACK": "infinity", "LimitSTACKSoft": "8388608", "LoadState": "loaded", "LockPersonality": "no", "LogLevelMax": "-1", "LogRateLimitBurst": "0", "LogRateLimitIntervalUSec": "0", "LogsDirectoryMode": "0755", "MainPID": "0", "ManagedOOMMemoryPressure": "auto", "ManagedOOMMemoryPressureLimit": "0", "ManagedOOMPreference": "none", "ManagedOOMSwap": "auto", "MemoryAccounting": "yes", "MemoryAvailable": "infinity", "MemoryCurrent": "[not set]", "MemoryDenyWriteExecute": "no", "MemoryHigh": "infinity", "MemoryLimit": "infinity", "MemoryLow": "0", "MemoryMax": "infinity", "MemoryMin": "0", "MemorySwapMax": "infinity", "MountAPIVFS": "no", "NFileDescriptorStore": "0", "NRestarts": "0", "NUMAPolicy": "n/a", "Names": "quadlet-pod-container.service", "NeedDaemonReload": "no", "Nice": "0", "NoNewPrivileges": "no", "NonBlocking": "no", "NotifyAccess": "all", "OOMPolicy": "continue", "OOMScoreAdjust": "200", "OnFailureJobMode": "replace", "OnSuccessJobMode": "fail", "Perpetual": "no", "PrivateDevices": "no", "PrivateIPC": "no", "PrivateMounts": "no", "PrivateNetwork": "no", "PrivateTmp": "no", "PrivateUsers": "no", "ProcSubset": "all", "ProtectClock": "no", "ProtectControlGroups": "no", "ProtectHome": "no", "ProtectHostname": "no", "ProtectKernelLogs": "no", "ProtectKernelModules": "no", "ProtectKernelTunables": "no", "ProtectProc": "default", "ProtectSystem": "no", "RefuseManualStart": "no", "RefuseManualStop": "no", "ReloadResult": "success", "ReloadSignal": "1", "RemainAfterExit": "no", "RemoveIPC": "no", "Requires": "app.slice basic.target", "RequiresMountsFor": "/run/user/2223/containers", "Restart": "no", "RestartKillSignal": "15", "RestartUSec": "100ms", "RestrictNamespaces": "no", "RestrictRealtime": "no", "RestrictSUIDSGID": "no", "Result": "exit-code", "RootDirectoryStartOnly": "no", "RuntimeDirectoryMode": "0755", "RuntimeDirectoryPreserve": "no", "RuntimeMaxUSec": "infinity", "RuntimeRandomizedExtraUSec": "0", "SameProcessGroup": "no", "SecureBits": "0", "SendSIGHUP": "no", "SendSIGKILL": "yes", "Slice": "app.slice", "SourcePath": "/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-container.container", "StandardError": "inherit", "StandardInput": "null", "StandardOutput": "journal", "StartLimitAction": "none", "StartLimitBurst": "5", "StartLimitIntervalUSec": "10s", "StartupBlockIOWeight": "[not set]", "StartupCPUShares": "[not set]", "StartupCPUWeight": "[not set]", "StartupIOWeight": "[not set]", "StateChangeTimestamp": "Mon 2025-07-07 20:17:28 EDT", "StateChangeTimestampMonotonic": "874147604", "StateDirectoryMode": "0755", "StatusErrno": "0", "StopWhenUnneeded": "no", "SubState": "failed", "SuccessAction": "none", "SyslogFacility": "3", "SyslogIdentifier": "quadlet-pod-container", "SyslogLevel": "6", "SyslogLevelPrefix": "yes", "SyslogPriority": "30", "SystemCallErrorNumber": "2147483646", "TTYReset": "no", "TTYVHangup": "no", "TTYVTDisallocate": "no", "TasksAccounting": "yes", "TasksCurrent": "[not set]", "TasksMax": "21900", "TimeoutAbortUSec": "1min 30s", "TimeoutCleanUSec": "infinity", "TimeoutStartFailureMode": "terminate", "TimeoutStartUSec": "1min 30s", "TimeoutStopFailureMode": "terminate", "TimeoutStopUSec": "1min 30s", "TimerSlackNSec": "50000", "Transient": "no", "Type": "notify", "UID": "[not set]", "UMask": "0022", "UnitFilePreset": "disabled", "UnitFileState": "generated", "UtmpMode": "init", "WantedBy": "default.target quadlet-pod-pod-pod.service", "Wants": "podman-user-wait-network-online.service", "WatchdogSignal": "6", "WatchdogTimestampMonotonic": "0", "WatchdogUSec": "0", "WorkingDirectory": "!/home/user_quadlet_pod" } } TASK [fedora.linux_system_roles.podman : See if quadlet file exists] *********** task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/cleanup_quadlet_spec.yml:34 Monday 07 July 2025 20:17:39 -0400 (0:00:00.576) 0:01:06.764 *********** ok: [managed-node2] => { "changed": false, "stat": { "atime": 1751933827.1369631, "attr_flags": "", "attributes": [], "block_size": 4096, "blocks": 8, "charset": "us-ascii", "checksum": "f0b5c8159fc3c65bf9310a371751609e4c1ba4c3", "ctime": 1751933826.5809596, "dev": 51713, "device_type": 0, "executable": false, "exists": true, "gid": 2223, "gr_name": "user_quadlet_pod", "inode": 41943287, "isblk": false, "ischr": false, "isdir": false, "isfifo": false, "isgid": false, "islnk": false, "isreg": true, "issock": false, "isuid": false, "mimetype": "text/plain", "mode": "0644", "mtime": 1751933826.324958, "nlink": 1, "path": "/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-container.container", "pw_name": "user_quadlet_pod", "readable": true, "rgrp": true, "roth": true, "rusr": true, "size": 230, "uid": 2223, "version": "2434089315", "wgrp": false, "woth": false, "writeable": true, "wusr": true, "xgrp": false, "xoth": false, "xusr": false } } TASK [fedora.linux_system_roles.podman : Parse quadlet file] ******************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/cleanup_quadlet_spec.yml:39 Monday 07 July 2025 20:17:39 -0400 (0:00:00.362) 0:01:07.127 *********** included: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/parse_quadlet_file.yml for managed-node2 TASK [fedora.linux_system_roles.podman : Slurp quadlet file] ******************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/parse_quadlet_file.yml:6 Monday 07 July 2025 20:17:39 -0400 (0:00:00.058) 0:01:07.185 *********** ok: [managed-node2] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Parse quadlet file] ******************* task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/parse_quadlet_file.yml:12 Monday 07 July 2025 20:17:39 -0400 (0:00:00.347) 0:01:07.533 *********** fatal: [managed-node2]: FAILED! => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result" } TASK [Dump journal] ************************************************************ task path: /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:194 Monday 07 July 2025 20:17:39 -0400 (0:00:00.037) 0:01:07.571 *********** fatal: [managed-node2]: FAILED! => { "changed": false, "cmd": [ "journalctl", "-ex" ], "delta": "0:00:00.038388", "end": "2025-07-07 20:17:40.162217", "failed_when_result": true, "rc": 0, "start": "2025-07-07 20:17:40.123829" } STDOUT: Jul 07 20:13:28 managed-node2 aardvark-dns[29155]: Received SIGHUP Jul 07 20:13:28 managed-node2 aardvark-dns[29155]: Successfully parsed config Jul 07 20:13:28 managed-node2 aardvark-dns[29155]: Listen v4 ip {} Jul 07 20:13:28 managed-node2 aardvark-dns[29155]: Listen v6 ip {} Jul 07 20:13:28 managed-node2 aardvark-dns[29155]: No configuration found stopping the sever Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Called cleanup.PersistentPreRunE(/usr/bin/podman --root /home/podman_basic_user/.local/share/containers/storage --runroot /run/user/3001/containers --log-level debug --cgroup-manager systemd --tmpdir /run/user/3001/libpod/tmp --network-config-dir --network-backend netavark --volumepath /home/podman_basic_user/.local/share/containers/storage/volumes --db-backend sqlite --transient-store=false --runtime crun --storage-driver overlay --events-backend file --syslog container cleanup --stopped-only e9692bbfc519ef92cef48f387f6e39e18dec1d44e7caa03632016f9015c87147)" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Setting custom database backend: \"sqlite\"" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Using conmon: \"/usr/bin/conmon\"" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=info msg="Using sqlite as database backend" Jul 07 20:13:28 managed-node2 kernel: podman1: port 1(veth0) entered disabled state Jul 07 20:13:28 managed-node2 kernel: veth0 (unregistering): left allmulticast mode Jul 07 20:13:28 managed-node2 kernel: veth0 (unregistering): left promiscuous mode Jul 07 20:13:28 managed-node2 kernel: podman1: port 1(veth0) entered disabled state Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="systemd-logind: Unknown object '/'." Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Using graph driver overlay" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Using graph root /home/podman_basic_user/.local/share/containers/storage" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Using run root /run/user/3001/containers" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Using static dir /home/podman_basic_user/.local/share/containers/storage/libpod" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Using tmp dir /run/user/3001/libpod/tmp" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Using volume path /home/podman_basic_user/.local/share/containers/storage/volumes" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Using transient store: false" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="[graphdriver] trying provided driver \"overlay\"" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Cached value indicated that overlay is supported" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Cached value indicated that overlay is supported" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Cached value indicated that metacopy is not being used" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Cached value indicated that native-diff is usable" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="backingFs=xfs, projectQuotaSupported=false, useNativeDiff=true, usingMetacopy=false" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Initializing event backend file" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Configured OCI runtime runj initialization failed: no valid executable found for OCI runtime runj: invalid argument" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Configured OCI runtime kata initialization failed: no valid executable found for OCI runtime kata: invalid argument" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Configured OCI runtime runsc initialization failed: no valid executable found for OCI runtime runsc: invalid argument" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Configured OCI runtime youki initialization failed: no valid executable found for OCI runtime youki: invalid argument" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Configured OCI runtime krun initialization failed: no valid executable found for OCI runtime krun: invalid argument" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Configured OCI runtime crun-wasm initialization failed: no valid executable found for OCI runtime crun-wasm: invalid argument" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Configured OCI runtime ocijail initialization failed: no valid executable found for OCI runtime ocijail: invalid argument" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Configured OCI runtime crun-vm initialization failed: no valid executable found for OCI runtime crun-vm: invalid argument" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Configured OCI runtime runc initialization failed: no valid executable found for OCI runtime runc: invalid argument" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Using OCI runtime \"/usr/bin/crun\"" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=info msg="Setting parallel job count to 7" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Called cleanup.PersistentPostRunE(/usr/bin/podman --root /home/podman_basic_user/.local/share/containers/storage --runroot /run/user/3001/containers --log-level debug --cgroup-manager systemd --tmpdir /run/user/3001/libpod/tmp --network-config-dir --network-backend netavark --volumepath /home/podman_basic_user/.local/share/containers/storage/volumes --db-backend sqlite --transient-store=false --runtime crun --storage-driver overlay --events-backend file --syslog container cleanup --stopped-only e9692bbfc519ef92cef48f387f6e39e18dec1d44e7caa03632016f9015c87147)" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=debug msg="Shutting down engines" Jul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time="2025-07-07T20:13:28-04:00" level=info msg="Received shutdown.Stop(), terminating!" PID=29684 Jul 07 20:13:28 managed-node2 systemd[27808]: Removed slice cgroup user-libpod_pod_0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f.slice. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 83 and the job result is done. Jul 07 20:13:28 managed-node2 systemd[27808]: user-libpod_pod_0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f.slice: Failed to open /run/user/3001/systemd/transient/user-libpod_pod_0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f.slice: No such file or directory Jul 07 20:13:28 managed-node2 systemd[27808]: user-libpod_pod_0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f.slice: Failed to open /run/user/3001/systemd/transient/user-libpod_pod_0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f.slice: No such file or directory Jul 07 20:13:29 managed-node2 systemd[27808]: user-libpod_pod_0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f.slice: Failed to open /run/user/3001/systemd/transient/user-libpod_pod_0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f.slice: No such file or directory Jul 07 20:13:29 managed-node2 podman[29660]: Pods stopped: Jul 07 20:13:29 managed-node2 podman[29660]: 0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f Jul 07 20:13:29 managed-node2 podman[29660]: Pods removed: Jul 07 20:13:29 managed-node2 podman[29660]: 0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f Jul 07 20:13:29 managed-node2 podman[29660]: Secrets removed: Jul 07 20:13:29 managed-node2 podman[29660]: Volumes removed: Jul 07 20:13:29 managed-node2 systemd[27808]: Created slice cgroup user-libpod_pod_a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4.slice. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 84. Jul 07 20:13:29 managed-node2 systemd[27808]: Started libcrun container. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 88. Jul 07 20:13:29 managed-node2 systemd[27808]: Started rootless-netns-281f12f7.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 92. Jul 07 20:13:29 managed-node2 kernel: podman1: port 1(veth0) entered blocking state Jul 07 20:13:29 managed-node2 kernel: podman1: port 1(veth0) entered disabled state Jul 07 20:13:29 managed-node2 kernel: veth0: entered allmulticast mode Jul 07 20:13:29 managed-node2 kernel: veth0: entered promiscuous mode Jul 07 20:13:29 managed-node2 kernel: podman1: port 1(veth0) entered blocking state Jul 07 20:13:29 managed-node2 kernel: podman1: port 1(veth0) entered forwarding state Jul 07 20:13:29 managed-node2 systemd[27808]: Started /usr/libexec/podman/aardvark-dns --config /run/user/3001/containers/networks/aardvark-dns -p 53 run. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 96. Jul 07 20:13:29 managed-node2 systemd[27808]: Started libcrun container. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 100. Jul 07 20:13:29 managed-node2 systemd[27808]: Started libcrun container. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 105. Jul 07 20:13:29 managed-node2 systemd[27808]: Started A template for running K8s workloads via podman-kube-play. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 70. Jul 07 20:13:29 managed-node2 podman[29660]: Pod: Jul 07 20:13:29 managed-node2 podman[29660]: a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4 Jul 07 20:13:29 managed-node2 podman[29660]: Container: Jul 07 20:13:29 managed-node2 podman[29660]: 98a702eb9e86b0efc7d3e6878bf2b4db5ac6ff3d0bc5383014d2958ce12dced5 Jul 07 20:13:29 managed-node2 sudo[29655]: pam_unix(sudo:session): session closed for user podman_basic_user Jul 07 20:13:30 managed-node2 python3.9[29983]: ansible-getent Invoked with database=passwd key=root fail_key=False service=None split=None Jul 07 20:13:30 managed-node2 python3.9[30133]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:13:32 managed-node2 python3.9[30284]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/httpd2.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:13:33 managed-node2 python3.9[30434]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd2 state=directory owner=root group=root recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:13:33 managed-node2 python3.9[30583]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd2-create state=directory owner=root group=root recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:13:34 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jul 07 20:13:34 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jul 07 20:13:34 managed-node2 podman[30766]: 2025-07-07 20:13:34.583080898 -0400 EDT m=+0.387686363 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610 Jul 07 20:13:35 managed-node2 python3.9[30929]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd2.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:13:35 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jul 07 20:13:35 managed-node2 python3.9[31078]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d state=directory owner=root group=0 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:13:35 managed-node2 python3.9[31227]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd2.yml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Jul 07 20:13:36 managed-node2 python3.9[31347]: ansible-ansible.legacy.copy Invoked with dest=/etc/containers/ansible-kubernetes.d/httpd2.yml owner=root group=0 mode=0644 src=/root/.ansible/tmp/ansible-tmp-1751933615.542873-13335-149986577229905/.source.yml _original_basename=.op5axps4 follow=False checksum=ce164467a3a112a82832f62e0fdfcaf3c7eecdd1 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:13:36 managed-node2 python3.9[31496]: ansible-containers.podman.podman_play Invoked with state=started debug=True log_level=debug kube_file=/etc/containers/ansible-kubernetes.d/httpd2.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None quiet=None recreate=None userns=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None Jul 07 20:13:36 managed-node2 podman[31503]: 2025-07-07 20:13:36.614627171 -0400 EDT m=+0.024918635 network create 0a842076c75338ed23c6283f120afac661325ee3d3188d4246c57fcbd0ff921c (name=podman-default-kube-network, type=bridge) Jul 07 20:13:36 managed-node2 systemd[1]: Created slice cgroup machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice. ░░ Subject: A start job for unit machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice has finished successfully. ░░ ░░ The job identifier is 1383. Jul 07 20:13:36 managed-node2 podman[31503]: 2025-07-07 20:13:36.657483611 -0400 EDT m=+0.067774829 container create b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 (image=, name=a89535868ec0-infra, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5) Jul 07 20:13:36 managed-node2 podman[31503]: 2025-07-07 20:13:36.663455669 -0400 EDT m=+0.073746755 pod create a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5 (image=, name=httpd2) Jul 07 20:13:36 managed-node2 podman[31503]: 2025-07-07 20:13:36.690624127 -0400 EDT m=+0.100915321 container create f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5, app=test, io.containers.autoupdate=registry, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0) Jul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7101] manager: (podman1): new Bridge device (/org/freedesktop/NetworkManager/Devices/3) Jul 07 20:13:36 managed-node2 kernel: podman1: port 1(veth0) entered blocking state Jul 07 20:13:36 managed-node2 kernel: podman1: port 1(veth0) entered disabled state Jul 07 20:13:36 managed-node2 kernel: veth0: entered allmulticast mode Jul 07 20:13:36 managed-node2 podman[31503]: 2025-07-07 20:13:36.667763108 -0400 EDT m=+0.078054303 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610 Jul 07 20:13:36 managed-node2 kernel: veth0: entered promiscuous mode Jul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7230] manager: (veth0): new Veth device (/org/freedesktop/NetworkManager/Devices/4) Jul 07 20:13:36 managed-node2 kernel: podman1: port 1(veth0) entered blocking state Jul 07 20:13:36 managed-node2 kernel: podman1: port 1(veth0) entered forwarding state Jul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7287] device (veth0): carrier: link connected Jul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7289] device (podman1): carrier: link connected Jul 07 20:13:36 managed-node2 systemd-udevd[31526]: Network interface NamePolicy= disabled on kernel command line. Jul 07 20:13:36 managed-node2 systemd-udevd[31524]: Network interface NamePolicy= disabled on kernel command line. Jul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7661] device (podman1): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external') Jul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7667] device (podman1): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'external') Jul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7674] device (podman1): Activation: starting connection 'podman1' (eac731d7-3726-4468-a790-cf1c7402dd92) Jul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7676] device (podman1): state change: disconnected -> prepare (reason 'none', managed-type: 'external') Jul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7680] device (podman1): state change: prepare -> config (reason 'none', managed-type: 'external') Jul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7682] device (podman1): state change: config -> ip-config (reason 'none', managed-type: 'external') Jul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7686] device (podman1): state change: ip-config -> ip-check (reason 'none', managed-type: 'external') Jul 07 20:13:36 managed-node2 systemd[1]: Starting Network Manager Script Dispatcher Service... ░░ Subject: A start job for unit NetworkManager-dispatcher.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has begun execution. ░░ ░░ The job identifier is 1388. Jul 07 20:13:36 managed-node2 systemd[1]: Started Network Manager Script Dispatcher Service. ░░ Subject: A start job for unit NetworkManager-dispatcher.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has finished successfully. ░░ ░░ The job identifier is 1388. Jul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7916] device (podman1): state change: ip-check -> secondaries (reason 'none', managed-type: 'external') Jul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7918] device (podman1): state change: secondaries -> activated (reason 'none', managed-type: 'external') Jul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7923] device (podman1): Activation: successful, device activated. Jul 07 20:13:36 managed-node2 systemd[1]: Started /usr/libexec/podman/aardvark-dns --config /run/containers/networks/aardvark-dns -p 53 run. ░░ Subject: A start job for unit run-rf8a9b32703c44fe9919a21200707a783.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit run-rf8a9b32703c44fe9919a21200707a783.scope has finished successfully. ░░ ░░ The job identifier is 1454. Jul 07 20:13:36 managed-node2 aardvark-dns[31613]: starting aardvark on a child with pid 31614 Jul 07 20:13:36 managed-node2 aardvark-dns[31614]: Successfully parsed config Jul 07 20:13:36 managed-node2 aardvark-dns[31614]: Listen v4 ip {"podman-default-kube-network": [10.89.0.1]} Jul 07 20:13:36 managed-node2 aardvark-dns[31614]: Listen v6 ip {} Jul 07 20:13:36 managed-node2 aardvark-dns[31614]: Using the following upstream servers: [10.29.169.13:53, 10.29.170.12:53, 10.2.32.1:53] Jul 07 20:13:36 managed-node2 systemd[1]: Started libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope. ░░ Subject: A start job for unit libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has finished successfully. ░░ ░░ The job identifier is 1458. Jul 07 20:13:36 managed-node2 conmon[31618]: conmon b54360e34ffcfca4fbf3 : addr{sun_family=AF_UNIX, sun_path=/proc/self/fd/12/attach} Jul 07 20:13:36 managed-node2 conmon[31618]: conmon b54360e34ffcfca4fbf3 : terminal_ctrl_fd: 12 Jul 07 20:13:36 managed-node2 conmon[31618]: conmon b54360e34ffcfca4fbf3 : winsz read side: 16, winsz write side: 17 Jul 07 20:13:36 managed-node2 systemd[1]: Started libcrun container. ░░ Subject: A start job for unit libpod-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has finished successfully. ░░ ░░ The job identifier is 1463. Jul 07 20:13:36 managed-node2 conmon[31618]: conmon b54360e34ffcfca4fbf3 : container PID: 31620 Jul 07 20:13:36 managed-node2 podman[31503]: 2025-07-07 20:13:36.960118446 -0400 EDT m=+0.370409669 container init b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 (image=, name=a89535868ec0-infra, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5) Jul 07 20:13:36 managed-node2 podman[31503]: 2025-07-07 20:13:36.963833697 -0400 EDT m=+0.374124951 container start b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 (image=, name=a89535868ec0-infra, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5) Jul 07 20:13:36 managed-node2 systemd[1]: Started libpod-conmon-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope. ░░ Subject: A start job for unit libpod-conmon-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-conmon-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope has finished successfully. ░░ ░░ The job identifier is 1468. Jul 07 20:13:36 managed-node2 conmon[31623]: conmon f451c929f398dfcc2303 : addr{sun_family=AF_UNIX, sun_path=/proc/self/fd/11/attach} Jul 07 20:13:36 managed-node2 conmon[31623]: conmon f451c929f398dfcc2303 : terminal_ctrl_fd: 11 Jul 07 20:13:36 managed-node2 conmon[31623]: conmon f451c929f398dfcc2303 : winsz read side: 15, winsz write side: 16 Jul 07 20:13:37 managed-node2 systemd[1]: Started libcrun container. ░░ Subject: A start job for unit libpod-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope has finished successfully. ░░ ░░ The job identifier is 1473. Jul 07 20:13:37 managed-node2 conmon[31623]: conmon f451c929f398dfcc2303 : container PID: 31625 Jul 07 20:13:37 managed-node2 podman[31503]: 2025-07-07 20:13:37.017137848 -0400 EDT m=+0.427429053 container init f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5, io.buildah.version=1.21.0, io.containers.autoupdate=registry, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage) Jul 07 20:13:37 managed-node2 podman[31503]: 2025-07-07 20:13:37.020217845 -0400 EDT m=+0.430509047 container start f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5, io.buildah.version=1.21.0, io.containers.autoupdate=registry, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage) Jul 07 20:13:37 managed-node2 podman[31503]: 2025-07-07 20:13:37.026166839 -0400 EDT m=+0.436457992 pod start a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5 (image=, name=httpd2) Jul 07 20:13:37 managed-node2 python3.9[31496]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE command: /usr/bin/podman play kube --start=true --log-level=debug /etc/containers/ansible-kubernetes.d/httpd2.yml Jul 07 20:13:37 managed-node2 python3.9[31496]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stdout: Pod: a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5 Container: f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 Jul 07 20:13:37 managed-node2 python3.9[31496]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stderr: time="2025-07-07T20:13:36-04:00" level=info msg="/usr/bin/podman filtering at log level debug" time="2025-07-07T20:13:36-04:00" level=debug msg="Called kube.PersistentPreRunE(/usr/bin/podman play kube --start=true --log-level=debug /etc/containers/ansible-kubernetes.d/httpd2.yml)" time="2025-07-07T20:13:36-04:00" level=debug msg="Using conmon: \"/usr/bin/conmon\"" time="2025-07-07T20:13:36-04:00" level=info msg="Using sqlite as database backend" time="2025-07-07T20:13:36-04:00" level=debug msg="Using graph driver overlay" time="2025-07-07T20:13:36-04:00" level=debug msg="Using graph root /var/lib/containers/storage" time="2025-07-07T20:13:36-04:00" level=debug msg="Using run root /run/containers/storage" time="2025-07-07T20:13:36-04:00" level=debug msg="Using static dir /var/lib/containers/storage/libpod" time="2025-07-07T20:13:36-04:00" level=debug msg="Using tmp dir /run/libpod" time="2025-07-07T20:13:36-04:00" level=debug msg="Using volume path /var/lib/containers/storage/volumes" time="2025-07-07T20:13:36-04:00" level=debug msg="Using transient store: false" time="2025-07-07T20:13:36-04:00" level=debug msg="[graphdriver] trying provided driver \"overlay\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Cached value indicated that overlay is supported" time="2025-07-07T20:13:36-04:00" level=debug msg="Cached value indicated that overlay is supported" time="2025-07-07T20:13:36-04:00" level=debug msg="Cached value indicated that metacopy is being used" time="2025-07-07T20:13:36-04:00" level=debug msg="Cached value indicated that native-diff is not being used" time="2025-07-07T20:13:36-04:00" level=info msg="Not using native diff for overlay, this may cause degraded performance for building images: kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled" time="2025-07-07T20:13:36-04:00" level=debug msg="backingFs=xfs, projectQuotaSupported=false, useNativeDiff=false, usingMetacopy=true" time="2025-07-07T20:13:36-04:00" level=debug msg="Initializing event backend journald" time="2025-07-07T20:13:36-04:00" level=debug msg="Configured OCI runtime runc initialization failed: no valid executable found for OCI runtime runc: invalid argument" time="2025-07-07T20:13:36-04:00" level=debug msg="Configured OCI runtime runj initialization failed: no valid executable found for OCI runtime runj: invalid argument" time="2025-07-07T20:13:36-04:00" level=debug msg="Configured OCI runtime kata initialization failed: no valid executable found for OCI runtime kata: invalid argument" time="2025-07-07T20:13:36-04:00" level=debug msg="Configured OCI runtime runsc initialization failed: no valid executable found for OCI runtime runsc: invalid argument" time="2025-07-07T20:13:36-04:00" level=debug msg="Configured OCI runtime krun initialization failed: no valid executable found for OCI runtime krun: invalid argument" time="2025-07-07T20:13:36-04:00" level=debug msg="Configured OCI runtime ocijail initialization failed: no valid executable found for OCI runtime ocijail: invalid argument" time="2025-07-07T20:13:36-04:00" level=debug msg="Configured OCI runtime crun-vm initialization failed: no valid executable found for OCI runtime crun-vm: invalid argument" time="2025-07-07T20:13:36-04:00" level=debug msg="Configured OCI runtime youki initialization failed: no valid executable found for OCI runtime youki: invalid argument" time="2025-07-07T20:13:36-04:00" level=debug msg="Configured OCI runtime crun-wasm initialization failed: no valid executable found for OCI runtime crun-wasm: invalid argument" time="2025-07-07T20:13:36-04:00" level=debug msg="Using OCI runtime \"/usr/bin/crun\"" time="2025-07-07T20:13:36-04:00" level=info msg="Setting parallel job count to 7" time="2025-07-07T20:13:36-04:00" level=debug msg="Successfully loaded network podman-default-kube-network: &{podman-default-kube-network 0a842076c75338ed23c6283f120afac661325ee3d3188d4246c57fcbd0ff921c bridge podman1 2025-07-07 20:11:21.084048926 -0400 EDT [{{{10.89.0.0 ffffff00}} 10.89.0.1 }] [] false false true [] map[] map[] map[driver:host-local]}" time="2025-07-07T20:13:36-04:00" level=debug msg="Successfully loaded 2 networks" time="2025-07-07T20:13:36-04:00" level=debug msg="Pod using bridge network mode" time="2025-07-07T20:13:36-04:00" level=debug msg="Created cgroup path machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice for parent machine.slice and name libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5" time="2025-07-07T20:13:36-04:00" level=debug msg="Created cgroup machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice" time="2025-07-07T20:13:36-04:00" level=debug msg="Got pod cgroup as machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice" time="2025-07-07T20:13:36-04:00" level=debug msg="no command or entrypoint provided, and no CMD or ENTRYPOINT from image: defaulting to empty string" time="2025-07-07T20:13:36-04:00" level=debug msg="using systemd mode: false" time="2025-07-07T20:13:36-04:00" level=debug msg="setting container name a89535868ec0-infra" time="2025-07-07T20:13:36-04:00" level=debug msg="Loading seccomp profile from \"/usr/share/containers/seccomp.json\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Allocated lock 1 for container b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2" time="2025-07-07T20:13:36-04:00" level=debug msg="Cached value indicated that idmapped mounts for overlay are supported" time="2025-07-07T20:13:36-04:00" level=debug msg="Created container \"b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Container \"b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2\" has work directory \"/var/lib/containers/storage/overlay-containers/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2/userdata\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Container \"b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2\" has run directory \"/run/containers/storage/overlay-containers/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2/userdata\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Looking up image \"quay.io/libpod/testimage:20210610\" in local containers storage" time="2025-07-07T20:13:36-04:00" level=debug msg="Normalized platform linux/amd64 to {amd64 linux [] }" time="2025-07-07T20:13:36-04:00" level=debug msg="Trying \"quay.io/libpod/testimage:20210610\" ..." time="2025-07-07T20:13:36-04:00" level=debug msg="parsed reference into \"[overlay@/var/lib/containers/storage+/run/containers/storage:overlay.mountopt=nodev,metacopy=on]@9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Found image \"quay.io/libpod/testimage:20210610\" as \"quay.io/libpod/testimage:20210610\" in local containers storage" time="2025-07-07T20:13:36-04:00" level=debug msg="Found image \"quay.io/libpod/testimage:20210610\" as \"quay.io/libpod/testimage:20210610\" in local containers storage ([overlay@/var/lib/containers/storage+/run/containers/storage:overlay.mountopt=nodev,metacopy=on]@9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f)" time="2025-07-07T20:13:36-04:00" level=debug msg="exporting opaque data as blob \"sha256:9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Pulling image quay.io/libpod/testimage:20210610 (policy: missing)" time="2025-07-07T20:13:36-04:00" level=debug msg="Looking up image \"quay.io/libpod/testimage:20210610\" in local containers storage" time="2025-07-07T20:13:36-04:00" level=debug msg="Normalized platform linux/amd64 to {amd64 linux [] }" time="2025-07-07T20:13:36-04:00" level=debug msg="Trying \"quay.io/libpod/testimage:20210610\" ..." time="2025-07-07T20:13:36-04:00" level=debug msg="parsed reference into \"[overlay@/var/lib/containers/storage+/run/containers/storage:overlay.mountopt=nodev,metacopy=on]@9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Found image \"quay.io/libpod/testimage:20210610\" as \"quay.io/libpod/testimage:20210610\" in local containers storage" time="2025-07-07T20:13:36-04:00" level=debug msg="Found image \"quay.io/libpod/testimage:20210610\" as \"quay.io/libpod/testimage:20210610\" in local containers storage ([overlay@/var/lib/containers/storage+/run/containers/storage:overlay.mountopt=nodev,metacopy=on]@9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f)" time="2025-07-07T20:13:36-04:00" level=debug msg="exporting opaque data as blob \"sha256:9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Inspecting image 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f" time="2025-07-07T20:13:36-04:00" level=debug msg="exporting opaque data as blob \"sha256:9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\"" time="2025-07-07T20:13:36-04:00" level=debug msg="exporting opaque data as blob \"sha256:9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Looking up image \"quay.io/libpod/testimage:20210610\" in local containers storage" time="2025-07-07T20:13:36-04:00" level=debug msg="Normalized platform linux/amd64 to {amd64 linux [] }" time="2025-07-07T20:13:36-04:00" level=debug msg="Trying \"quay.io/libpod/testimage:20210610\" ..." time="2025-07-07T20:13:36-04:00" level=debug msg="parsed reference into \"[overlay@/var/lib/containers/storage+/run/containers/storage:overlay.mountopt=nodev,metacopy=on]@9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Found image \"quay.io/libpod/testimage:20210610\" as \"quay.io/libpod/testimage:20210610\" in local containers storage" time="2025-07-07T20:13:36-04:00" level=debug msg="Found image \"quay.io/libpod/testimage:20210610\" as \"quay.io/libpod/testimage:20210610\" in local containers storage ([overlay@/var/lib/containers/storage+/run/containers/storage:overlay.mountopt=nodev,metacopy=on]@9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f)" time="2025-07-07T20:13:36-04:00" level=debug msg="exporting opaque data as blob \"sha256:9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Inspecting image 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f" time="2025-07-07T20:13:36-04:00" level=debug msg="exporting opaque data as blob \"sha256:9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\"" time="2025-07-07T20:13:36-04:00" level=debug msg="exporting opaque data as blob \"sha256:9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Inspecting image 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f" time="2025-07-07T20:13:36-04:00" level=debug msg="Inspecting image 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f" time="2025-07-07T20:13:36-04:00" level=debug msg="using systemd mode: false" time="2025-07-07T20:13:36-04:00" level=debug msg="adding container to pod httpd2" time="2025-07-07T20:13:36-04:00" level=debug msg="setting container name httpd2-httpd2" time="2025-07-07T20:13:36-04:00" level=debug msg="Loading seccomp profile from \"/usr/share/containers/seccomp.json\"" time="2025-07-07T20:13:36-04:00" level=info msg="Sysctl net.ipv4.ping_group_range=0 0 ignored in containers.conf, since Network Namespace set to host" time="2025-07-07T20:13:36-04:00" level=debug msg="Adding mount /proc" time="2025-07-07T20:13:36-04:00" level=debug msg="Adding mount /dev" time="2025-07-07T20:13:36-04:00" level=debug msg="Adding mount /dev/pts" time="2025-07-07T20:13:36-04:00" level=debug msg="Adding mount /dev/mqueue" time="2025-07-07T20:13:36-04:00" level=debug msg="Adding mount /sys" time="2025-07-07T20:13:36-04:00" level=debug msg="Adding mount /sys/fs/cgroup" time="2025-07-07T20:13:36-04:00" level=debug msg="Allocated lock 2 for container f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06" time="2025-07-07T20:13:36-04:00" level=debug msg="exporting opaque data as blob \"sha256:9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Created container \"f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Container \"f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06\" has work directory \"/var/lib/containers/storage/overlay-containers/f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06/userdata\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Container \"f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06\" has run directory \"/run/containers/storage/overlay-containers/f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06/userdata\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Strongconnecting node b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2" time="2025-07-07T20:13:36-04:00" level=debug msg="Pushed b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 onto stack" time="2025-07-07T20:13:36-04:00" level=debug msg="Finishing node b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2. Popped b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 off stack" time="2025-07-07T20:13:36-04:00" level=debug msg="Strongconnecting node f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06" time="2025-07-07T20:13:36-04:00" level=debug msg="Pushed f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 onto stack" time="2025-07-07T20:13:36-04:00" level=debug msg="Finishing node f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06. Popped f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 off stack" time="2025-07-07T20:13:36-04:00" level=debug msg="Made network namespace at /run/netns/netns-3dcd885d-1b51-2e38-72ff-33596f02c329 for container b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2" [DEBUG netavark::network::validation] Validating network namespace... [DEBUG netavark::commands::setup] Setting up... [INFO netavark::firewall] Using iptables firewall driver [DEBUG netavark::network::bridge] Setup network podman-default-kube-network [DEBUG netavark::network::bridge] Container interface name: eth0 with IP addresses [10.89.0.2/24] [DEBUG netavark::network::bridge] Bridge name: podman1 with IP addresses [10.89.0.1/24] [DEBUG netavark::network::core_utils] Setting sysctl value for net.ipv4.ip_forward to 1 [DEBUG netavark::network::core_utils] Setting sysctl value for /proc/sys/net/ipv4/conf/podman1/rp_filter to 2 time="2025-07-07T20:13:36-04:00" level=debug msg="Created root filesystem for container b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 at /var/lib/containers/storage/overlay-containers/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2/rootfs/merge" [DEBUG netavark::network::core_utils] Setting sysctl value for /proc/sys/net/ipv6/conf/eth0/autoconf to 0 [DEBUG netavark::network::core_utils] Setting sysctl value for /proc/sys/net/ipv4/conf/eth0/arp_notify to 1 [DEBUG netavark::network::core_utils] Setting sysctl value for /proc/sys/net/ipv4/conf/eth0/rp_filter to 2 [INFO netavark::network::netlink] Adding route (dest: 0.0.0.0/0 ,gw: 10.89.0.1, metric 100) [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK-4B9D9135B29BA created on table nat [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK_ISOLATION_2 created on table filter [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK_ISOLATION_3 created on table filter [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK_INPUT created on table filter [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK_FORWARD created on table filter [DEBUG netavark::firewall::varktables::helpers] rule -d 10.89.0.0/24 -j ACCEPT created on table nat and chain NETAVARK-4B9D9135B29BA [DEBUG netavark::firewall::varktables::helpers] rule ! -d 224.0.0.0/4 -j MASQUERADE created on table nat and chain NETAVARK-4B9D9135B29BA [DEBUG netavark::firewall::varktables::helpers] rule -s 10.89.0.0/24 -j NETAVARK-4B9D9135B29BA created on table nat and chain POSTROUTING [DEBUG netavark::firewall::varktables::helpers] rule -p udp -s 10.89.0.0/24 --dport 53 -j ACCEPT created on table filter and chain NETAVARK_INPUT [DEBUG netavark::firewall::varktables::helpers] rule -p tcp -s 10.89.0.0/24 --dport 53 -j ACCEPT created on table filter and chain NETAVARK_INPUT [DEBUG netavark::firewall::varktables::helpers] rule -m conntrack --ctstate INVALID -j DROP created on table filter and chain NETAVARK_FORWARD [DEBUG netavark::firewall::varktables::helpers] rule -d 10.89.0.0/24 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT created on table filter and chain NETAVARK_FORWARD [DEBUG netavark::firewall::varktables::helpers] rule -s 10.89.0.0/24 -j ACCEPT created on table filter and chain NETAVARK_FORWARD [DEBUG netavark::firewall::firewalld] Adding firewalld rules for network 10.89.0.0/24 [DEBUG netavark::firewall::firewalld] Adding subnet 10.89.0.0/24 to zone trusted as source [DEBUG netavark::network::core_utils] Setting sysctl value for net.ipv4.conf.podman1.route_localnet to 1 [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK-HOSTPORT-SETMARK created on table nat [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK-HOSTPORT-MASQ created on table nat [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK-DN-4B9D9135B29BA created on table nat [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK-HOSTPORT-DNAT created on table nat [DEBUG netavark::firewall::varktables::helpers] rule -j MARK --set-xmark 0x2000/0x2000 created on table nat and chain NETAVARK-HOSTPORT-SETMARK [DEBUG netavark::firewall::varktables::helpers] rule -j MASQUERADE -m comment --comment 'netavark portfw masq mark' -m mark --mark 0x2000/0x2000 created on table nat and chain NETAVARK-HOSTPORT-MASQ [DEBUG netavark::firewall::varktables::helpers] rule -j NETAVARK-HOSTPORT-SETMARK -s 10.89.0.0/24 -p tcp --dport 15002 created on table nat and chain NETAVARK-DN-4B9D9135B29BA [DEBUG netavark::firewall::varktables::helpers] rule -j NETAVARK-HOSTPORT-SETMARK -s 127.0.0.1 -p tcp --dport 15002 created on table nat and chain NETAVARK-DN-4B9D9135B29BA [DEBUG netavark::firewall::varktables::helpers] rule -j DNAT -p tcp --to-destination 10.89.0.2:80 --destination-port 15002 created on table nat and chain NETAVARK-DN-4B9D9135B29BA [DEBUG netavark::firewall::varktables::helpers] rule -j NETAVARK-DN-4B9D9135B29BA -p tcp --dport 15002 -m comment --comment 'dnat name: podman-default-kube-network id: b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2' created on table nat and chain NETAVARK-HOSTPORT-DNAT [DEBUG netavark::firewall::varktables::helpers] rule -j NETAVARK-HOSTPORT-DNAT -m addrtype --dst-type LOCAL created on table nat and chain PREROUTING [DEBUG netavark::firewall::varktables::helpers] rule -j NETAVARK-HOSTPORT-DNAT -m addrtype --dst-type LOCAL created on table nat and chain OUTPUT [DEBUG netavark::dns::aardvark] Spawning aardvark server [DEBUG netavark::dns::aardvark] start aardvark-dns: ["systemd-run", "-q", "--scope", "/usr/libexec/podman/aardvark-dns", "--config", "/run/containers/networks/aardvark-dns", "-p", "53", "run"] [DEBUG netavark::commands::setup] { "podman-default-kube-network": StatusBlock { dns_search_domains: Some( [ "dns.podman", ], ), dns_server_ips: Some( [ 10.89.0.1, ], ), interfaces: Some( { "eth0": NetInterface { mac_address: "ce:5c:7c:33:0d:65", subnets: Some( [ NetAddress { gateway: Some( 10.89.0.1, ), ipnet: 10.89.0.2/24, }, ], ), }, }, ), }, } [DEBUG netavark::commands::setup] Setup complete time="2025-07-07T20:13:36-04:00" level=debug msg="/proc/sys/crypto/fips_enabled does not contain '1', not adding FIPS mode bind mounts" time="2025-07-07T20:13:36-04:00" level=debug msg="Setting Cgroups for container b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 to machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice:libpod:b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2" time="2025-07-07T20:13:36-04:00" level=debug msg="reading hooks from /usr/share/containers/oci/hooks.d" time="2025-07-07T20:13:36-04:00" level=debug msg="Workdir \"/\" resolved to host path \"/var/lib/containers/storage/overlay-containers/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2/rootfs/merge\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Created OCI spec for container b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 at /var/lib/containers/storage/overlay-containers/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2/userdata/config.json" time="2025-07-07T20:13:36-04:00" level=debug msg="Created cgroup path machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice for parent machine.slice and name libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5" time="2025-07-07T20:13:36-04:00" level=debug msg="Created cgroup machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice" time="2025-07-07T20:13:36-04:00" level=debug msg="Got pod cgroup as machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice" time="2025-07-07T20:13:36-04:00" level=debug msg="/usr/bin/conmon messages will be logged to syslog" time="2025-07-07T20:13:36-04:00" level=debug msg="running conmon: /usr/bin/conmon" args="[--api-version 1 -c b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 -u b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2/userdata -p /run/containers/storage/overlay-containers/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2/userdata/pidfile -n a89535868ec0-infra --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 --full-attach -s -l journald --log-level debug --syslog --conmon-pidfile /run/containers/storage/overlay-containers/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2/userdata/conmon.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg debug --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg --syslog --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2]" time="2025-07-07T20:13:36-04:00" level=info msg="Running conmon under slice machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice and unitName libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope" time="2025-07-07T20:13:36-04:00" level=debug msg="Received: 31620" time="2025-07-07T20:13:36-04:00" level=info msg="Got Conmon PID as 31618" time="2025-07-07T20:13:36-04:00" level=debug msg="Created container b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 in OCI runtime" time="2025-07-07T20:13:36-04:00" level=debug msg="Adding nameserver(s) from network status of '[\"10.89.0.1\"]'" time="2025-07-07T20:13:36-04:00" level=debug msg="Adding search domain(s) from network status of '[\"dns.podman\"]'" time="2025-07-07T20:13:36-04:00" level=debug msg="Starting container b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 with command [/catatonit -P]" time="2025-07-07T20:13:36-04:00" level=debug msg="Started container b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2" time="2025-07-07T20:13:36-04:00" level=debug msg="overlay: mount_data=lowerdir=/var/lib/containers/storage/overlay/l/BSXLDW6S4QQFLDJH6Z45ODLX6A,upperdir=/var/lib/containers/storage/overlay/031fa2927da1e19a93d45dbb8f74acff9ff44a2167616de52d9d6bf7a2e6be26/diff,workdir=/var/lib/containers/storage/overlay/031fa2927da1e19a93d45dbb8f74acff9ff44a2167616de52d9d6bf7a2e6be26/work,nodev,metacopy=on,context=\"system_u:object_r:container_file_t:s0:c263,c753\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Mounted container \"f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06\" at \"/var/lib/containers/storage/overlay/031fa2927da1e19a93d45dbb8f74acff9ff44a2167616de52d9d6bf7a2e6be26/merged\"" time="2025-07-07T20:13:36-04:00" level=debug msg="Created root filesystem for container f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 at /var/lib/containers/storage/overlay/031fa2927da1e19a93d45dbb8f74acff9ff44a2167616de52d9d6bf7a2e6be26/merged" time="2025-07-07T20:13:36-04:00" level=debug msg="/proc/sys/crypto/fips_enabled does not contain '1', not adding FIPS mode bind mounts" time="2025-07-07T20:13:36-04:00" level=debug msg="Setting Cgroups for container f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 to machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice:libpod:f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06" time="2025-07-07T20:13:36-04:00" level=debug msg="reading hooks from /usr/share/containers/oci/hooks.d" time="2025-07-07T20:13:36-04:00" level=debug msg="Workdir \"/var/www\" resolved to a volume or mount" time="2025-07-07T20:13:36-04:00" level=debug msg="Created OCI spec for container f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 at /var/lib/containers/storage/overlay-containers/f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06/userdata/config.json" time="2025-07-07T20:13:36-04:00" level=debug msg="Created cgroup path machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice for parent machine.slice and name libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5" time="2025-07-07T20:13:36-04:00" level=debug msg="Created cgroup machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice" time="2025-07-07T20:13:36-04:00" level=debug msg="Got pod cgroup as machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice" time="2025-07-07T20:13:36-04:00" level=debug msg="/usr/bin/conmon messages will be logged to syslog" time="2025-07-07T20:13:36-04:00" level=debug msg="running conmon: /usr/bin/conmon" args="[--api-version 1 -c f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 -u f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06/userdata -p /run/containers/storage/overlay-containers/f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06/userdata/pidfile -n httpd2-httpd2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 --full-attach -s -l journald --log-level debug --syslog --conmon-pidfile /run/containers/storage/overlay-containers/f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06/userdata/conmon.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg debug --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg --syslog --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06]" time="2025-07-07T20:13:36-04:00" level=info msg="Running conmon under slice machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice and unitName libpod-conmon-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope" time="2025-07-07T20:13:37-04:00" level=debug msg="Received: 31625" time="2025-07-07T20:13:37-04:00" level=info msg="Got Conmon PID as 31623" time="2025-07-07T20:13:37-04:00" level=debug msg="Created container f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 in OCI runtime" time="2025-07-07T20:13:37-04:00" level=debug msg="Starting container f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 with command [/bin/busybox-extras httpd -f -p 80]" time="2025-07-07T20:13:37-04:00" level=debug msg="Started container f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06" time="2025-07-07T20:13:37-04:00" level=debug msg="Called kube.PersistentPostRunE(/usr/bin/podman play kube --start=true --log-level=debug /etc/containers/ansible-kubernetes.d/httpd2.yml)" time="2025-07-07T20:13:37-04:00" level=debug msg="Shutting down engines" Jul 07 20:13:37 managed-node2 python3.9[31496]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE rc: 0 Jul 07 20:13:37 managed-node2 python3.9[31775]: ansible-systemd Invoked with daemon_reload=True scope=system daemon_reexec=False no_block=False name=None state=None enabled=None force=None masked=None Jul 07 20:13:37 managed-node2 systemd[1]: Reloading. Jul 07 20:13:37 managed-node2 systemd-rc-local-generator[31792]: /etc/rc.d/rc.local is not marked executable, skipping. Jul 07 20:13:38 managed-node2 python3.9[31958]: ansible-systemd Invoked with name=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service scope=system enabled=True daemon_reload=False daemon_reexec=False no_block=False state=None force=None masked=None Jul 07 20:13:38 managed-node2 systemd[1]: Reloading. Jul 07 20:13:38 managed-node2 systemd-rc-local-generator[31978]: /etc/rc.d/rc.local is not marked executable, skipping. Jul 07 20:13:39 managed-node2 python3.9[32143]: ansible-systemd Invoked with name=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service scope=system state=started daemon_reload=False daemon_reexec=False no_block=False enabled=None force=None masked=None Jul 07 20:13:39 managed-node2 systemd[1]: Created slice Slice /system/podman-kube. ░░ Subject: A start job for unit system-podman\x2dkube.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit system-podman\x2dkube.slice has finished successfully. ░░ ░░ The job identifier is 1479. Jul 07 20:13:39 managed-node2 systemd[1]: Starting A template for running K8s workloads via podman-kube-play... ░░ Subject: A start job for unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service has begun execution. ░░ ░░ The job identifier is 1478. Jul 07 20:13:39 managed-node2 podman[32147]: 2025-07-07 20:13:39.145431853 -0400 EDT m=+0.025527029 pod stop a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5 (image=, name=httpd2) Jul 07 20:13:46 managed-node2 systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit NetworkManager-dispatcher.service has successfully entered the 'dead' state. Jul 07 20:13:49 managed-node2 podman[32147]: time="2025-07-07T20:13:49-04:00" level=warning msg="StopSignal SIGTERM failed to stop container httpd2-httpd2 in 10 seconds, resorting to SIGKILL" Jul 07 20:13:49 managed-node2 systemd[1]: libpod-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope has successfully entered the 'dead' state. Jul 07 20:13:49 managed-node2 conmon[31623]: conmon f451c929f398dfcc2303 : container 31625 exited with status 137 Jul 07 20:13:49 managed-node2 conmon[31623]: conmon f451c929f398dfcc2303 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice/libpod-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope/container/memory.events Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.174232474 -0400 EDT m=+10.054327720 container died f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, io.containers.autoupdate=registry, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0) Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Called cleanup.PersistentPreRunE(/usr/bin/podman --root /var/lib/containers/storage --runroot /run/containers/storage --log-level debug --cgroup-manager systemd --tmpdir /run/libpod --network-config-dir --network-backend netavark --volumepath /var/lib/containers/storage/volumes --db-backend sqlite --transient-store=false --runtime crun --storage-driver overlay --storage-opt overlay.mountopt=nodev,metacopy=on --events-backend journald --syslog container cleanup --stopped-only f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06)" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Setting custom database backend: \"sqlite\"" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using conmon: \"/usr/bin/conmon\"" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=info msg="Using sqlite as database backend" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using graph driver overlay" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using graph root /var/lib/containers/storage" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using run root /run/containers/storage" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using static dir /var/lib/containers/storage/libpod" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using tmp dir /run/libpod" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using volume path /var/lib/containers/storage/volumes" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using transient store: false" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="[graphdriver] trying provided driver \"overlay\"" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Cached value indicated that overlay is supported" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Cached value indicated that overlay is supported" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Cached value indicated that metacopy is being used" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Cached value indicated that native-diff is not being used" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=info msg="Not using native diff for overlay, this may cause degraded performance for building images: kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="backingFs=xfs, projectQuotaSupported=false, useNativeDiff=false, usingMetacopy=true" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Initializing event backend journald" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime runc initialization failed: no valid executable found for OCI runtime runc: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime runj initialization failed: no valid executable found for OCI runtime runj: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime kata initialization failed: no valid executable found for OCI runtime kata: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime runsc initialization failed: no valid executable found for OCI runtime runsc: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime ocijail initialization failed: no valid executable found for OCI runtime ocijail: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime crun-vm initialization failed: no valid executable found for OCI runtime crun-vm: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime crun-wasm initialization failed: no valid executable found for OCI runtime crun-wasm: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime youki initialization failed: no valid executable found for OCI runtime youki: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime krun initialization failed: no valid executable found for OCI runtime krun: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using OCI runtime \"/usr/bin/crun\"" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=info msg="Setting parallel job count to 7" Jul 07 20:13:49 managed-node2 systemd[1]: var-lib-containers-storage-overlay-031fa2927da1e19a93d45dbb8f74acff9ff44a2167616de52d9d6bf7a2e6be26-merged.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay-031fa2927da1e19a93d45dbb8f74acff9ff44a2167616de52d9d6bf7a2e6be26-merged.mount has successfully entered the 'dead' state. Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.219658211 -0400 EDT m=+10.099753380 container cleanup f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry, app=test) Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Called cleanup.PersistentPostRunE(/usr/bin/podman --root /var/lib/containers/storage --runroot /run/containers/storage --log-level debug --cgroup-manager systemd --tmpdir /run/libpod --network-config-dir --network-backend netavark --volumepath /var/lib/containers/storage/volumes --db-backend sqlite --transient-store=false --runtime crun --storage-driver overlay --storage-opt overlay.mountopt=nodev,metacopy=on --events-backend journald --syslog container cleanup --stopped-only f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06)" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=debug msg="Shutting down engines" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time="2025-07-07T20:13:49-04:00" level=info msg="Received shutdown.Stop(), terminating!" PID=32158 Jul 07 20:13:49 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jul 07 20:13:49 managed-node2 systemd[1]: libpod-conmon-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-conmon-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope has successfully entered the 'dead' state. Jul 07 20:13:49 managed-node2 systemd[1]: libpod-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has successfully entered the 'dead' state. Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.241852628 -0400 EDT m=+10.121948152 container died b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 (image=, name=a89535868ec0-infra) Jul 07 20:13:49 managed-node2 aardvark-dns[31614]: Received SIGHUP Jul 07 20:13:49 managed-node2 systemd[1]: run-rf8a9b32703c44fe9919a21200707a783.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-rf8a9b32703c44fe9919a21200707a783.scope has successfully entered the 'dead' state. Jul 07 20:13:49 managed-node2 aardvark-dns[31614]: Successfully parsed config Jul 07 20:13:49 managed-node2 aardvark-dns[31614]: Listen v4 ip {} Jul 07 20:13:49 managed-node2 aardvark-dns[31614]: Listen v6 ip {} Jul 07 20:13:49 managed-node2 aardvark-dns[31614]: No configuration found stopping the sever Jul 07 20:13:49 managed-node2 kernel: podman1: port 1(veth0) entered disabled state Jul 07 20:13:49 managed-node2 kernel: veth0 (unregistering): left allmulticast mode Jul 07 20:13:49 managed-node2 kernel: veth0 (unregistering): left promiscuous mode Jul 07 20:13:49 managed-node2 kernel: podman1: port 1(veth0) entered disabled state Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Called cleanup.PersistentPreRunE(/usr/bin/podman --root /var/lib/containers/storage --runroot /run/containers/storage --log-level debug --cgroup-manager systemd --tmpdir /run/libpod --network-config-dir --network-backend netavark --volumepath /var/lib/containers/storage/volumes --db-backend sqlite --transient-store=false --runtime crun --storage-driver overlay --storage-opt overlay.mountopt=nodev,metacopy=on --events-backend journald --syslog container cleanup --stopped-only b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2)" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Setting custom database backend: \"sqlite\"" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using conmon: \"/usr/bin/conmon\"" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=info msg="Using sqlite as database backend" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using graph driver overlay" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using graph root /var/lib/containers/storage" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using run root /run/containers/storage" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using static dir /var/lib/containers/storage/libpod" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using tmp dir /run/libpod" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using volume path /var/lib/containers/storage/volumes" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using transient store: false" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="[graphdriver] trying provided driver \"overlay\"" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Cached value indicated that overlay is supported" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Cached value indicated that overlay is supported" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Cached value indicated that metacopy is being used" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Cached value indicated that native-diff is not being used" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=info msg="Not using native diff for overlay, this may cause degraded performance for building images: kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="backingFs=xfs, projectQuotaSupported=false, useNativeDiff=false, usingMetacopy=true" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Initializing event backend journald" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime kata initialization failed: no valid executable found for OCI runtime kata: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime runsc initialization failed: no valid executable found for OCI runtime runsc: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime krun initialization failed: no valid executable found for OCI runtime krun: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime ocijail initialization failed: no valid executable found for OCI runtime ocijail: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime crun-vm initialization failed: no valid executable found for OCI runtime crun-vm: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime crun-wasm initialization failed: no valid executable found for OCI runtime crun-wasm: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime youki initialization failed: no valid executable found for OCI runtime youki: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime runc initialization failed: no valid executable found for OCI runtime runc: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Configured OCI runtime runj initialization failed: no valid executable found for OCI runtime runj: invalid argument" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Using OCI runtime \"/usr/bin/crun\"" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=info msg="Setting parallel job count to 7" Jul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.2949] device (podman1): state change: activated -> unmanaged (reason 'unmanaged', managed-type: 'removed') Jul 07 20:13:49 managed-node2 systemd[1]: Starting Network Manager Script Dispatcher Service... ░░ Subject: A start job for unit NetworkManager-dispatcher.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has begun execution. ░░ ░░ The job identifier is 1551. Jul 07 20:13:49 managed-node2 systemd[1]: Started Network Manager Script Dispatcher Service. ░░ Subject: A start job for unit NetworkManager-dispatcher.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has finished successfully. ░░ ░░ The job identifier is 1551. Jul 07 20:13:49 managed-node2 systemd[1]: run-netns-netns\x2d3dcd885d\x2d1b51\x2d2e38\x2d72ff\x2d33596f02c329.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-netns-netns\x2d3dcd885d\x2d1b51\x2d2e38\x2d72ff\x2d33596f02c329.mount has successfully entered the 'dead' state. Jul 07 20:13:49 managed-node2 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2-rootfs-merge.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay\x2dcontainers-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2-rootfs-merge.mount has successfully entered the 'dead' state. Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.470731214 -0400 EDT m=+10.350826660 container cleanup b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 (image=, name=a89535868ec0-infra, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5) Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Called cleanup.PersistentPostRunE(/usr/bin/podman --root /var/lib/containers/storage --runroot /run/containers/storage --log-level debug --cgroup-manager systemd --tmpdir /run/libpod --network-config-dir --network-backend netavark --volumepath /var/lib/containers/storage/volumes --db-backend sqlite --transient-store=false --runtime crun --storage-driver overlay --storage-opt overlay.mountopt=nodev,metacopy=on --events-backend journald --syslog container cleanup --stopped-only b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2)" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=debug msg="Shutting down engines" Jul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time="2025-07-07T20:13:49-04:00" level=info msg="Received shutdown.Stop(), terminating!" PID=32170 Jul 07 20:13:49 managed-node2 systemd[1]: Stopping libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope... ░░ Subject: A stop job for unit libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has begun execution. ░░ ░░ The job identifier is 1618. Jul 07 20:13:49 managed-node2 systemd[1]: libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has successfully entered the 'dead' state. Jul 07 20:13:49 managed-node2 systemd[1]: Stopped libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope. ░░ Subject: A stop job for unit libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has finished. ░░ ░░ The job identifier is 1618 and the job result is done. Jul 07 20:13:49 managed-node2 systemd[1]: Removed slice cgroup machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice. ░░ Subject: A stop job for unit machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice has finished. ░░ ░░ The job identifier is 1617 and the job result is done. Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.543096205 -0400 EDT m=+10.423191413 container remove f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5, io.containers.autoupdate=registry, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0) Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.569252202 -0400 EDT m=+10.449347410 container remove b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 (image=, name=a89535868ec0-infra, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5) Jul 07 20:13:49 managed-node2 systemd[1]: machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice: Failed to open /run/systemd/transient/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice: No such file or directory Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.577427834 -0400 EDT m=+10.457523002 pod remove a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5 (image=, name=httpd2) Jul 07 20:13:49 managed-node2 podman[32147]: Pods stopped: Jul 07 20:13:49 managed-node2 podman[32147]: a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5 Jul 07 20:13:49 managed-node2 podman[32147]: Pods removed: Jul 07 20:13:49 managed-node2 podman[32147]: a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5 Jul 07 20:13:49 managed-node2 podman[32147]: Secrets removed: Jul 07 20:13:49 managed-node2 podman[32147]: Volumes removed: Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.577733569 -0400 EDT m=+10.457828934 network create 0a842076c75338ed23c6283f120afac661325ee3d3188d4246c57fcbd0ff921c (name=podman-default-kube-network, type=bridge) Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.601543415 -0400 EDT m=+10.481638618 container create 78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32 (image=, name=91ecf1609ad1-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service) Jul 07 20:13:49 managed-node2 systemd[1]: Created slice cgroup machine-libpod_pod_d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0.slice. ░░ Subject: A start job for unit machine-libpod_pod_d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit machine-libpod_pod_d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0.slice has finished successfully. ░░ ░░ The job identifier is 1619. Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.643353432 -0400 EDT m=+10.523448629 container create 08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b (image=, name=d17e7ef2e094-infra, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service) Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.649232898 -0400 EDT m=+10.529328325 pod create d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0 (image=, name=httpd2) Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.675477572 -0400 EDT m=+10.555572747 container create 3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, created_by=test/system/build-testimage, io.buildah.version=1.21.0, app=test, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service, created_at=2021-06-10T18:55:36Z) Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.675792512 -0400 EDT m=+10.555887719 container restart 78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32 (image=, name=91ecf1609ad1-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service) Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.651020296 -0400 EDT m=+10.531115614 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610 Jul 07 20:13:49 managed-node2 systemd[1]: Started libcrun container. ░░ Subject: A start job for unit libpod-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32.scope has finished successfully. ░░ ░░ The job identifier is 1623. Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.731153306 -0400 EDT m=+10.611248537 container init 78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32 (image=, name=91ecf1609ad1-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service) Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.734097189 -0400 EDT m=+10.614192506 container start 78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32 (image=, name=91ecf1609ad1-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service) Jul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7517] manager: (podman1): new Bridge device (/org/freedesktop/NetworkManager/Devices/5) Jul 07 20:13:49 managed-node2 systemd-udevd[32184]: Network interface NamePolicy= disabled on kernel command line. Jul 07 20:13:49 managed-node2 kernel: podman1: port 1(veth0) entered blocking state Jul 07 20:13:49 managed-node2 kernel: podman1: port 1(veth0) entered disabled state Jul 07 20:13:49 managed-node2 kernel: veth0: entered allmulticast mode Jul 07 20:13:49 managed-node2 kernel: veth0: entered promiscuous mode Jul 07 20:13:49 managed-node2 systemd-udevd[32189]: Network interface NamePolicy= disabled on kernel command line. Jul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7610] manager: (veth0): new Veth device (/org/freedesktop/NetworkManager/Devices/6) Jul 07 20:13:49 managed-node2 kernel: podman1: port 1(veth0) entered blocking state Jul 07 20:13:49 managed-node2 kernel: podman1: port 1(veth0) entered forwarding state Jul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7658] device (veth0): carrier: link connected Jul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7662] device (podman1): carrier: link connected Jul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7742] device (podman1): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external') Jul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7747] device (podman1): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'external') Jul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7759] device (podman1): Activation: starting connection 'podman1' (9a09baee-577d-45df-991f-e577871fe999) Jul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7761] device (podman1): state change: disconnected -> prepare (reason 'none', managed-type: 'external') Jul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7766] device (podman1): state change: prepare -> config (reason 'none', managed-type: 'external') Jul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7769] device (podman1): state change: config -> ip-config (reason 'none', managed-type: 'external') Jul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7773] device (podman1): state change: ip-config -> ip-check (reason 'none', managed-type: 'external') Jul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7875] device (podman1): state change: ip-check -> secondaries (reason 'none', managed-type: 'external') Jul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7879] device (podman1): state change: secondaries -> activated (reason 'none', managed-type: 'external') Jul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7899] device (podman1): Activation: successful, device activated. Jul 07 20:13:49 managed-node2 systemd[1]: Started /usr/libexec/podman/aardvark-dns --config /run/containers/networks/aardvark-dns -p 53 run. ░░ Subject: A start job for unit run-rce7152e4cf79441b86b3f3ed7d6f4283.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit run-rce7152e4cf79441b86b3f3ed7d6f4283.scope has finished successfully. ░░ ░░ The job identifier is 1627. Jul 07 20:13:49 managed-node2 systemd[1]: Started libcrun container. ░░ Subject: A start job for unit libpod-08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b.scope has finished successfully. ░░ ░░ The job identifier is 1631. Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.929395798 -0400 EDT m=+10.809491054 container init 08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b (image=, name=d17e7ef2e094-infra, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service) Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.932349651 -0400 EDT m=+10.812444917 container start 08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b (image=, name=d17e7ef2e094-infra, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service) Jul 07 20:13:49 managed-node2 systemd[1]: Started libcrun container. ░░ Subject: A start job for unit libpod-3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5.scope has finished successfully. ░░ ░░ The job identifier is 1636. Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.975052109 -0400 EDT m=+10.855147428 container init 3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0) Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.978227355 -0400 EDT m=+10.858322532 container start 3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, io.buildah.version=1.21.0, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage) Jul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.984086055 -0400 EDT m=+10.864181258 pod start d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0 (image=, name=httpd2) Jul 07 20:13:49 managed-node2 systemd[1]: Started A template for running K8s workloads via podman-kube-play. ░░ Subject: A start job for unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service has finished successfully. ░░ ░░ The job identifier is 1478. Jul 07 20:13:49 managed-node2 podman[32147]: Pod: Jul 07 20:13:49 managed-node2 podman[32147]: d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0 Jul 07 20:13:49 managed-node2 podman[32147]: Container: Jul 07 20:13:49 managed-node2 podman[32147]: 3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5 Jul 07 20:13:50 managed-node2 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2-userdata-shm.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay\x2dcontainers-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2-userdata-shm.mount has successfully entered the 'dead' state. Jul 07 20:13:51 managed-node2 python3.9[32468]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:13:52 managed-node2 python3.9[32619]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/httpd3.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:13:53 managed-node2 python3.9[32769]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd3 state=directory owner=root group=root recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:13:53 managed-node2 python3.9[32918]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd3-create state=directory owner=root group=root recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:13:54 managed-node2 podman[33098]: 2025-07-07 20:13:54.593475273 -0400 EDT m=+0.280680463 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610 Jul 07 20:13:55 managed-node2 python3.9[33262]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd3.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:13:55 managed-node2 python3.9[33411]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d state=directory owner=root group=0 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:13:55 managed-node2 python3.9[33560]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd3.yml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Jul 07 20:13:56 managed-node2 python3.9[33680]: ansible-ansible.legacy.copy Invoked with dest=/etc/containers/ansible-kubernetes.d/httpd3.yml owner=root group=0 mode=0644 src=/root/.ansible/tmp/ansible-tmp-1751933635.6006646-13961-107071336953239/.source.yml _original_basename=._73jk67j follow=False checksum=5b3685de46cacb0a0661419a5a5898cbb3cf431c backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:13:56 managed-node2 python3.9[33829]: ansible-containers.podman.podman_play Invoked with state=started kube_file=/etc/containers/ansible-kubernetes.d/httpd3.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None debug=None quiet=None recreate=None userns=None log_level=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None Jul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.647468979 -0400 EDT m=+0.017382508 network create 0a842076c75338ed23c6283f120afac661325ee3d3188d4246c57fcbd0ff921c (name=podman-default-kube-network, type=bridge) Jul 07 20:13:56 managed-node2 systemd[1]: Created slice cgroup machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice. ░░ Subject: A start job for unit machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice has finished successfully. ░░ ░░ The job identifier is 1641. Jul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.687774841 -0400 EDT m=+0.057688390 container create f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b (image=, name=06637f87acc7-infra, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7) Jul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.693778889 -0400 EDT m=+0.063692415 pod create 06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7 (image=, name=httpd3) Jul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.718042148 -0400 EDT m=+0.087955671 container create d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, app=test, io.containers.autoupdate=registry) Jul 07 20:13:56 managed-node2 kernel: podman1: port 2(veth1) entered blocking state Jul 07 20:13:56 managed-node2 kernel: podman1: port 2(veth1) entered disabled state Jul 07 20:13:56 managed-node2 kernel: veth1: entered allmulticast mode Jul 07 20:13:56 managed-node2 kernel: veth1: entered promiscuous mode Jul 07 20:13:56 managed-node2 NetworkManager[644]: [1751933636.7464] manager: (veth1): new Veth device (/org/freedesktop/NetworkManager/Devices/7) Jul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.695533824 -0400 EDT m=+0.065447531 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610 Jul 07 20:13:56 managed-node2 kernel: podman1: port 2(veth1) entered blocking state Jul 07 20:13:56 managed-node2 kernel: podman1: port 2(veth1) entered forwarding state Jul 07 20:13:56 managed-node2 NetworkManager[644]: [1751933636.7489] device (veth1): carrier: link connected Jul 07 20:13:56 managed-node2 systemd-udevd[33860]: Network interface NamePolicy= disabled on kernel command line. Jul 07 20:13:56 managed-node2 systemd[1]: Started libpod-conmon-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope. ░░ Subject: A start job for unit libpod-conmon-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-conmon-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope has finished successfully. ░░ ░░ The job identifier is 1646. Jul 07 20:13:56 managed-node2 systemd[1]: Started libcrun container. ░░ Subject: A start job for unit libpod-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope has finished successfully. ░░ ░░ The job identifier is 1651. Jul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.874054258 -0400 EDT m=+0.243967914 container init f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b (image=, name=06637f87acc7-infra, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7) Jul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.877423946 -0400 EDT m=+0.247337594 container start f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b (image=, name=06637f87acc7-infra, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7) Jul 07 20:13:56 managed-node2 systemd[1]: Started libpod-conmon-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope. ░░ Subject: A start job for unit libpod-conmon-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-conmon-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope has finished successfully. ░░ ░░ The job identifier is 1656. Jul 07 20:13:56 managed-node2 systemd[1]: Started libcrun container. ░░ Subject: A start job for unit libpod-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope has finished successfully. ░░ ░░ The job identifier is 1661. Jul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.931183676 -0400 EDT m=+0.301097308 container init d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry) Jul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.93420031 -0400 EDT m=+0.304113971 container start d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry, app=test) Jul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.940215927 -0400 EDT m=+0.310129474 pod start 06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7 (image=, name=httpd3) Jul 07 20:13:57 managed-node2 python3.9[34065]: ansible-systemd Invoked with daemon_reload=True scope=system daemon_reexec=False no_block=False name=None state=None enabled=None force=None masked=None Jul 07 20:13:57 managed-node2 systemd[1]: Reloading. Jul 07 20:13:57 managed-node2 systemd-rc-local-generator[34083]: /etc/rc.d/rc.local is not marked executable, skipping. Jul 07 20:13:58 managed-node2 python3.9[34248]: ansible-systemd Invoked with name=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service scope=system enabled=True daemon_reload=False daemon_reexec=False no_block=False state=None force=None masked=None Jul 07 20:13:58 managed-node2 systemd[1]: Reloading. Jul 07 20:13:58 managed-node2 systemd-rc-local-generator[34268]: /etc/rc.d/rc.local is not marked executable, skipping. Jul 07 20:13:59 managed-node2 python3.9[34433]: ansible-systemd Invoked with name=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service scope=system state=started daemon_reload=False daemon_reexec=False no_block=False enabled=None force=None masked=None Jul 07 20:13:59 managed-node2 systemd[1]: Starting A template for running K8s workloads via podman-kube-play... ░░ Subject: A start job for unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service has begun execution. ░░ ░░ The job identifier is 1666. Jul 07 20:13:59 managed-node2 podman[34437]: 2025-07-07 20:13:59.128818064 -0400 EDT m=+0.031043200 pod stop 06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7 (image=, name=httpd3) Jul 07 20:13:59 managed-node2 systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit NetworkManager-dispatcher.service has successfully entered the 'dead' state. Jul 07 20:14:09 managed-node2 podman[34437]: time="2025-07-07T20:14:09-04:00" level=warning msg="StopSignal SIGTERM failed to stop container httpd3-httpd3 in 10 seconds, resorting to SIGKILL" Jul 07 20:14:09 managed-node2 systemd[1]: libpod-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope has successfully entered the 'dead' state. Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.161891677 -0400 EDT m=+10.064117231 container died d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry, app=test, created_at=2021-06-10T18:55:36Z) Jul 07 20:14:09 managed-node2 systemd[1]: var-lib-containers-storage-overlay-ea9de557ba623f700a03785c93f2fae562cdde6abc47bc4578532dd100d74f80-merged.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay-ea9de557ba623f700a03785c93f2fae562cdde6abc47bc4578532dd100d74f80-merged.mount has successfully entered the 'dead' state. Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.208209429 -0400 EDT m=+10.110434520 container cleanup d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry) Jul 07 20:14:09 managed-node2 systemd[1]: libpod-conmon-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-conmon-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope has successfully entered the 'dead' state. Jul 07 20:14:09 managed-node2 systemd[1]: libpod-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope has successfully entered the 'dead' state. Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.228424072 -0400 EDT m=+10.130649401 container died f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b (image=, name=06637f87acc7-infra) Jul 07 20:14:09 managed-node2 kernel: podman1: port 2(veth1) entered disabled state Jul 07 20:14:09 managed-node2 kernel: veth1 (unregistering): left allmulticast mode Jul 07 20:14:09 managed-node2 kernel: veth1 (unregistering): left promiscuous mode Jul 07 20:14:09 managed-node2 kernel: podman1: port 2(veth1) entered disabled state Jul 07 20:14:09 managed-node2 systemd[1]: run-netns-netns\x2db10132db\x2d5af1\x2d0f8c\x2d38ab\x2d1e8eaa97e6f2.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-netns-netns\x2db10132db\x2d5af1\x2d0f8c\x2d38ab\x2d1e8eaa97e6f2.mount has successfully entered the 'dead' state. Jul 07 20:14:09 managed-node2 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b-rootfs-merge.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay\x2dcontainers-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b-rootfs-merge.mount has successfully entered the 'dead' state. Jul 07 20:14:09 managed-node2 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b-userdata-shm.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay\x2dcontainers-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b-userdata-shm.mount has successfully entered the 'dead' state. Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.330607359 -0400 EDT m=+10.232832448 container cleanup f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b (image=, name=06637f87acc7-infra, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7) Jul 07 20:14:09 managed-node2 systemd[1]: libpod-conmon-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-conmon-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope has successfully entered the 'dead' state. Jul 07 20:14:09 managed-node2 systemd[1]: Stopped libpod-conmon-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope. ░░ Subject: A stop job for unit libpod-conmon-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit libpod-conmon-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope has finished. ░░ ░░ The job identifier is 1740 and the job result is done. Jul 07 20:14:09 managed-node2 systemd[1]: Removed slice cgroup machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice. ░░ Subject: A stop job for unit machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice has finished. ░░ ░░ The job identifier is 1739 and the job result is done. Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.33777977 -0400 EDT m=+10.240004889 pod stop 06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7 (image=, name=httpd3) Jul 07 20:14:09 managed-node2 systemd[1]: machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice: Failed to open /run/systemd/transient/machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice: No such file or directory Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.341042664 -0400 EDT m=+10.243267751 pod stop 06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7 (image=, name=httpd3) Jul 07 20:14:09 managed-node2 systemd[1]: machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice: Failed to open /run/systemd/transient/machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice: No such file or directory Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.366974147 -0400 EDT m=+10.269199273 container remove d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry) Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.392885411 -0400 EDT m=+10.295110535 container remove f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b (image=, name=06637f87acc7-infra, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7) Jul 07 20:14:09 managed-node2 systemd[1]: machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice: Failed to open /run/systemd/transient/machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice: No such file or directory Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.400728494 -0400 EDT m=+10.302953580 pod remove 06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7 (image=, name=httpd3) Jul 07 20:14:09 managed-node2 podman[34437]: Pods stopped: Jul 07 20:14:09 managed-node2 podman[34437]: 06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7 Jul 07 20:14:09 managed-node2 podman[34437]: Pods removed: Jul 07 20:14:09 managed-node2 podman[34437]: 06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7 Jul 07 20:14:09 managed-node2 podman[34437]: Secrets removed: Jul 07 20:14:09 managed-node2 podman[34437]: Volumes removed: Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.40084298 -0400 EDT m=+10.303068230 network create 0a842076c75338ed23c6283f120afac661325ee3d3188d4246c57fcbd0ff921c (name=podman-default-kube-network, type=bridge) Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.421632285 -0400 EDT m=+10.323857401 container create eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc (image=, name=55c9b4d93968-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service) Jul 07 20:14:09 managed-node2 systemd[1]: Created slice cgroup machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice. ░░ Subject: A start job for unit machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice has finished successfully. ░░ ░░ The job identifier is 1741. Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.456874206 -0400 EDT m=+10.359099322 container create e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641 (image=, name=0b34d5e9f949-infra, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service) Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.463729321 -0400 EDT m=+10.365954523 pod create 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36 (image=, name=httpd3) Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.466690532 -0400 EDT m=+10.368915893 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610 Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.495205758 -0400 EDT m=+10.397430876 container create 9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, created_by=test/system/build-testimage, io.buildah.version=1.21.0, app=test, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service, created_at=2021-06-10T18:55:36Z) Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.495540708 -0400 EDT m=+10.397765831 container restart eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc (image=, name=55c9b4d93968-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service) Jul 07 20:14:09 managed-node2 systemd[1]: Started libcrun container. ░░ Subject: A start job for unit libpod-eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc.scope has finished successfully. ░░ ░░ The job identifier is 1745. Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.541938993 -0400 EDT m=+10.444164111 container init eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc (image=, name=55c9b4d93968-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service) Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.54556115 -0400 EDT m=+10.447786446 container start eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc (image=, name=55c9b4d93968-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service) Jul 07 20:14:09 managed-node2 kernel: podman1: port 2(veth1) entered blocking state Jul 07 20:14:09 managed-node2 kernel: podman1: port 2(veth1) entered disabled state Jul 07 20:14:09 managed-node2 kernel: veth1: entered allmulticast mode Jul 07 20:14:09 managed-node2 kernel: veth1: entered promiscuous mode Jul 07 20:14:09 managed-node2 kernel: podman1: port 2(veth1) entered blocking state Jul 07 20:14:09 managed-node2 kernel: podman1: port 2(veth1) entered forwarding state Jul 07 20:14:09 managed-node2 NetworkManager[644]: [1751933649.5632] manager: (veth1): new Veth device (/org/freedesktop/NetworkManager/Devices/8) Jul 07 20:14:09 managed-node2 NetworkManager[644]: [1751933649.5684] device (veth1): carrier: link connected Jul 07 20:14:09 managed-node2 systemd-udevd[34477]: Network interface NamePolicy= disabled on kernel command line. Jul 07 20:14:09 managed-node2 systemd[1]: Started libcrun container. ░░ Subject: A start job for unit libpod-e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641.scope has finished successfully. ░░ ░░ The job identifier is 1749. Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.678444807 -0400 EDT m=+10.580670010 container init e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641 (image=, name=0b34d5e9f949-infra, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service) Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.681382862 -0400 EDT m=+10.583608134 container start e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641 (image=, name=0b34d5e9f949-infra, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service) Jul 07 20:14:09 managed-node2 systemd[1]: Started libcrun container. ░░ Subject: A start job for unit libpod-9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109.scope has finished successfully. ░░ ░░ The job identifier is 1754. Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.728795591 -0400 EDT m=+10.631020730 container init 9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service, app=test) Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.73177019 -0400 EDT m=+10.633995385 container start 9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, io.buildah.version=1.21.0, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage) Jul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.737566302 -0400 EDT m=+10.639791423 pod start 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36 (image=, name=httpd3) Jul 07 20:14:09 managed-node2 podman[34437]: Pod: Jul 07 20:14:09 managed-node2 podman[34437]: 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36 Jul 07 20:14:09 managed-node2 podman[34437]: Container: Jul 07 20:14:09 managed-node2 podman[34437]: 9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109 Jul 07 20:14:09 managed-node2 systemd[1]: Started A template for running K8s workloads via podman-kube-play. ░░ Subject: A start job for unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service has finished successfully. ░░ ░░ The job identifier is 1666. Jul 07 20:14:10 managed-node2 sudo[34704]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:14:10 managed-node2 sudo[34704]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cflwckudrftyvniytbtokrawwncefyyk ; /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933650.260143-14379-210639111977054/AnsiballZ_command.py' Jul 07 20:14:10 managed-node2 sudo[34704]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0) Jul 07 20:14:10 managed-node2 python3.9[34706]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod inspect httpd1 --format '{{.State}}' _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:14:10 managed-node2 systemd[27808]: Started podman-34715.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 110. Jul 07 20:14:10 managed-node2 sudo[34704]: pam_unix(sudo:session): session closed for user podman_basic_user Jul 07 20:14:10 managed-node2 python3.9[34872]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod inspect httpd2 --format '{{.State}}' _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:14:11 managed-node2 python3.9[35029]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod inspect httpd3 --format '{{.State}}' _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:14:11 managed-node2 sudo[35186]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:14:11 managed-node2 sudo[35186]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-utoemynxahtksrgkxmppktxcnibfjzhy ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933651.5642703-14440-50388675366557/AnsiballZ_command.py' Jul 07 20:14:11 managed-node2 sudo[35186]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0) Jul 07 20:14:11 managed-node2 python3.9[35188]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail systemctl --user list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd1[.]yml[.]service[ ]+loaded[ ]+active ' _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:14:11 managed-node2 sudo[35186]: pam_unix(sudo:session): session closed for user podman_basic_user Jul 07 20:14:12 managed-node2 python3.9[35340]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail systemctl --system list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd2[.]yml[.]service[ ]+loaded[ ]+active ' _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:14:12 managed-node2 python3.9[35492]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail systemctl --system list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd3[.]yml[.]service[ ]+loaded[ ]+active ' _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:14:13 managed-node2 python3.9[35644]: ansible-ansible.legacy.uri Invoked with url=http://localhost:15001/index.txt return_content=True force=False http_agent=ansible-httpget use_proxy=True validate_certs=True force_basic_auth=False use_gssapi=False body_format=raw method=GET follow_redirects=safe status_code=[200] timeout=30 headers={} remote_src=False unredirected_headers=[] decompress=True use_netrc=True unsafe_writes=False url_username=None url_password=NOT_LOGGING_PARAMETER client_cert=None client_key=None dest=None body=None src=None creates=None removes=None unix_socket=None ca_path=None ciphers=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:14:13 managed-node2 python3.9[35795]: ansible-ansible.legacy.uri Invoked with url=http://localhost:15002/index.txt return_content=True force=False http_agent=ansible-httpget use_proxy=True validate_certs=True force_basic_auth=False use_gssapi=False body_format=raw method=GET follow_redirects=safe status_code=[200] timeout=30 headers={} remote_src=False unredirected_headers=[] decompress=True use_netrc=True unsafe_writes=False url_username=None url_password=NOT_LOGGING_PARAMETER client_cert=None client_key=None dest=None body=None src=None creates=None removes=None unix_socket=None ca_path=None ciphers=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:14:14 managed-node2 python3.9[35945]: ansible-ansible.legacy.command Invoked with _raw_params=ls -alrtF /tmp/lsr_8xkyz6d8_podman/httpd1-create _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:14:14 managed-node2 python3.9[36095]: ansible-ansible.legacy.command Invoked with _raw_params=ls -alrtF /tmp/lsr_8xkyz6d8_podman/httpd2-create _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:14:14 managed-node2 python3.9[36245]: ansible-ansible.legacy.command Invoked with _raw_params=ls -alrtF /tmp/lsr_8xkyz6d8_podman/httpd3-create _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:14:17 managed-node2 python3.9[36544]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:14:18 managed-node2 python3.9[36699]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:14:21 managed-node2 python3.9[36850]: ansible-ansible.legacy.dnf Invoked with name=['firewalld'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None Jul 07 20:14:23 managed-node2 python3.9[37000]: ansible-systemd Invoked with name=firewalld masked=False daemon_reload=False daemon_reexec=False scope=system no_block=False state=None enabled=None force=None Jul 07 20:14:23 managed-node2 python3.9[37151]: ansible-ansible.legacy.systemd Invoked with name=firewalld enabled=True state=started daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None Jul 07 20:14:24 managed-node2 python3.9[37302]: ansible-fedora.linux_system_roles.firewall_lib Invoked with port=['15001-15003/tcp'] permanent=True runtime=True state=enabled __report_changed=True online=True service=[] source_port=[] forward_port=[] rich_rule=[] source=[] interface=[] interface_pci_id=[] icmp_block=[] timeout=0 ipset_entries=[] protocol=[] helper_module=[] destination=[] includes=[] firewalld_conf=None masquerade=None icmp_block_inversion=None target=None zone=None set_default_zone=None ipset=None ipset_type=None description=None short=None Jul 07 20:14:26 managed-node2 python3.9[37451]: ansible-ansible.legacy.dnf Invoked with name=['python3-libselinux', 'python3-policycoreutils'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None Jul 07 20:14:27 managed-node2 python3.9[37601]: ansible-ansible.legacy.dnf Invoked with name=['grubby'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None Jul 07 20:14:29 managed-node2 python3.9[37751]: ansible-ansible.legacy.dnf Invoked with name=['policycoreutils-python-utils'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None Jul 07 20:14:30 managed-node2 python3.9[37901]: ansible-setup Invoked with filter=['ansible_selinux'] gather_subset=['all'] gather_timeout=10 fact_path=/etc/ansible/facts.d Jul 07 20:14:32 managed-node2 python3.9[38089]: ansible-fedora.linux_system_roles.local_seport Invoked with ports=['15001-15003'] proto=tcp setype=http_port_t state=present local=False ignore_selinux_state=False reload=True Jul 07 20:14:33 managed-node2 python3.9[38238]: ansible-fedora.linux_system_roles.selinux_modules_facts Invoked Jul 07 20:14:37 managed-node2 python3.9[38387]: ansible-getent Invoked with database=passwd key=podman_basic_user fail_key=False service=None split=None Jul 07 20:14:38 managed-node2 python3.9[38537]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:14:38 managed-node2 python3.9[38688]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:14:39 managed-node2 python3.9[38838]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:14:40 managed-node2 python3.9[38988]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:14:40 managed-node2 python3.9[39138]: ansible-ansible.legacy.command Invoked with creates=/var/lib/systemd/linger/podman_basic_user _raw_params=loginctl enable-linger podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None removes=None stdin=None Jul 07 20:14:41 managed-node2 python3.9[39287]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd1 state=directory owner=podman_basic_user group=3001 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:14:41 managed-node2 python3.9[39436]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd1-create state=directory owner=podman_basic_user group=3001 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:14:42 managed-node2 sudo[39585]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:14:42 managed-node2 sudo[39585]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dteqtauzgahpdwlqmxqoqvigwdlcbwgx ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933682.0866897-15498-43758369682888/AnsiballZ_podman_image.py' Jul 07 20:14:42 managed-node2 sudo[39585]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0) Jul 07 20:14:42 managed-node2 systemd[27808]: Started podman-39588.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 114. Jul 07 20:14:42 managed-node2 systemd[27808]: Started podman-39596.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 118. Jul 07 20:14:42 managed-node2 systemd[27808]: Started podman-39604.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 122. Jul 07 20:14:42 managed-node2 systemd[27808]: Started podman-39611.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 126. Jul 07 20:14:42 managed-node2 systemd[27808]: Started podman-39618.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 130. Jul 07 20:14:43 managed-node2 systemd[27808]: Started podman-39626.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 134. Jul 07 20:14:43 managed-node2 sudo[39585]: pam_unix(sudo:session): session closed for user podman_basic_user Jul 07 20:14:43 managed-node2 python3.9[39782]: ansible-stat Invoked with path=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:14:43 managed-node2 python3.9[39933]: ansible-file Invoked with path=/home/podman_basic_user/.config/containers/ansible-kubernetes.d state=directory owner=podman_basic_user group=3001 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:14:44 managed-node2 python3.9[40082]: ansible-ansible.legacy.stat Invoked with path=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Jul 07 20:14:44 managed-node2 python3.9[40157]: ansible-ansible.legacy.file Invoked with owner=podman_basic_user group=3001 mode=0644 dest=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml _original_basename=.3ieew216 recurse=False state=file path=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:14:44 managed-node2 sudo[40306]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:14:44 managed-node2 sudo[40306]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ustvjrkhcohlxhhxarkriiiprsdnnhal ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933684.6664407-15601-193248781752908/AnsiballZ_podman_play.py' Jul 07 20:14:44 managed-node2 sudo[40306]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0) Jul 07 20:14:44 managed-node2 python3.9[40308]: ansible-containers.podman.podman_play Invoked with state=started debug=True log_level=debug kube_file=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None quiet=None recreate=None userns=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None Jul 07 20:14:45 managed-node2 systemd[27808]: Started podman-40315.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 138. Jul 07 20:14:45 managed-node2 systemd[27808]: Created slice cgroup user-libpod_pod_f60d38092e129513b51a0b2f07fc5347e46ea863cdd2a5b1d5752245e63e591c.slice. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 142. Jul 07 20:14:45 managed-node2 python3.9[40308]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE command: /bin/podman play kube --start=true --log-level=debug /home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml Jul 07 20:14:45 managed-node2 python3.9[40308]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stdout: Jul 07 20:14:45 managed-node2 python3.9[40308]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stderr: time="2025-07-07T20:14:45-04:00" level=info msg="/bin/podman filtering at log level debug" time="2025-07-07T20:14:45-04:00" level=debug msg="Called kube.PersistentPreRunE(/bin/podman play kube --start=true --log-level=debug /home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml)" time="2025-07-07T20:14:45-04:00" level=debug msg="Using conmon: \"/usr/bin/conmon\"" time="2025-07-07T20:14:45-04:00" level=info msg="Using sqlite as database backend" time="2025-07-07T20:14:45-04:00" level=debug msg="systemd-logind: Unknown object '/'." time="2025-07-07T20:14:45-04:00" level=debug msg="Using graph driver overlay" time="2025-07-07T20:14:45-04:00" level=debug msg="Using graph root /home/podman_basic_user/.local/share/containers/storage" time="2025-07-07T20:14:45-04:00" level=debug msg="Using run root /run/user/3001/containers" time="2025-07-07T20:14:45-04:00" level=debug msg="Using static dir /home/podman_basic_user/.local/share/containers/storage/libpod" time="2025-07-07T20:14:45-04:00" level=debug msg="Using tmp dir /run/user/3001/libpod/tmp" time="2025-07-07T20:14:45-04:00" level=debug msg="Using volume path /home/podman_basic_user/.local/share/containers/storage/volumes" time="2025-07-07T20:14:45-04:00" level=debug msg="Using transient store: false" time="2025-07-07T20:14:45-04:00" level=debug msg="[graphdriver] trying provided driver \"overlay\"" time="2025-07-07T20:14:45-04:00" level=debug msg="Cached value indicated that overlay is supported" time="2025-07-07T20:14:45-04:00" level=debug msg="Cached value indicated that overlay is supported" time="2025-07-07T20:14:45-04:00" level=debug msg="Cached value indicated that metacopy is not being used" time="2025-07-07T20:14:45-04:00" level=debug msg="Cached value indicated that native-diff is usable" time="2025-07-07T20:14:45-04:00" level=debug msg="backingFs=xfs, projectQuotaSupported=false, useNativeDiff=true, usingMetacopy=false" time="2025-07-07T20:14:45-04:00" level=debug msg="Initializing event backend file" time="2025-07-07T20:14:45-04:00" level=debug msg="Configured OCI runtime krun initialization failed: no valid executable found for OCI runtime krun: invalid argument" time="2025-07-07T20:14:45-04:00" level=debug msg="Configured OCI runtime crun-vm initialization failed: no valid executable found for OCI runtime crun-vm: invalid argument" time="2025-07-07T20:14:45-04:00" level=debug msg="Configured OCI runtime crun-wasm initialization failed: no valid executable found for OCI runtime crun-wasm: invalid argument" time="2025-07-07T20:14:45-04:00" level=debug msg="Configured OCI runtime runc initialization failed: no valid executable found for OCI runtime runc: invalid argument" time="2025-07-07T20:14:45-04:00" level=debug msg="Configured OCI runtime ocijail initialization failed: no valid executable found for OCI runtime ocijail: invalid argument" time="2025-07-07T20:14:45-04:00" level=debug msg="Configured OCI runtime runj initialization failed: no valid executable found for OCI runtime runj: invalid argument" time="2025-07-07T20:14:45-04:00" level=debug msg="Configured OCI runtime kata initialization failed: no valid executable found for OCI runtime kata: invalid argument" time="2025-07-07T20:14:45-04:00" level=debug msg="Configured OCI runtime runsc initialization failed: no valid executable found for OCI runtime runsc: invalid argument" time="2025-07-07T20:14:45-04:00" level=debug msg="Configured OCI runtime youki initialization failed: no valid executable found for OCI runtime youki: invalid argument" time="2025-07-07T20:14:45-04:00" level=debug msg="Using OCI runtime \"/usr/bin/crun\"" time="2025-07-07T20:14:45-04:00" level=info msg="Setting parallel job count to 7" time="2025-07-07T20:14:45-04:00" level=debug msg="Successfully loaded network podman-default-kube-network: &{podman-default-kube-network f726a0dfc720eef9b785c3acdef2ddc0ef169e999e9185270f7b5fdceae44256 bridge podman1 2025-07-07 20:13:16.261934543 -0400 EDT [{{{10.89.0.0 ffffff00}} 10.89.0.1 }] [] false false true [] map[] map[] map[driver:host-local]}" time="2025-07-07T20:14:45-04:00" level=debug msg="Successfully loaded 2 networks" time="2025-07-07T20:14:45-04:00" level=debug msg="Pod using bridge network mode" time="2025-07-07T20:14:45-04:00" level=debug msg="Created cgroup path user.slice/user-libpod_pod_f60d38092e129513b51a0b2f07fc5347e46ea863cdd2a5b1d5752245e63e591c.slice for parent user.slice and name libpod_pod_f60d38092e129513b51a0b2f07fc5347e46ea863cdd2a5b1d5752245e63e591c" time="2025-07-07T20:14:45-04:00" level=debug msg="Created cgroup user.slice/user-libpod_pod_f60d38092e129513b51a0b2f07fc5347e46ea863cdd2a5b1d5752245e63e591c.slice" time="2025-07-07T20:14:45-04:00" level=debug msg="Got pod cgroup as user.slice/user-3001.slice/user@3001.service/user.slice/user-libpod_pod_f60d38092e129513b51a0b2f07fc5347e46ea863cdd2a5b1d5752245e63e591c.slice" Error: adding pod to state: name "httpd1" is in use: pod already exists time="2025-07-07T20:14:45-04:00" level=debug msg="Shutting down engines" time="2025-07-07T20:14:45-04:00" level=info msg="Received shutdown.Stop(), terminating!" PID=40315 Jul 07 20:14:45 managed-node2 python3.9[40308]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE rc: 125 Jul 07 20:14:45 managed-node2 sudo[40306]: pam_unix(sudo:session): session closed for user podman_basic_user Jul 07 20:14:46 managed-node2 python3.9[40471]: ansible-getent Invoked with database=passwd key=root fail_key=False service=None split=None Jul 07 20:14:46 managed-node2 python3.9[40621]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:14:47 managed-node2 python3.9[40772]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/httpd2.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:14:49 managed-node2 python3.9[40922]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd2 state=directory owner=root group=root recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:14:49 managed-node2 python3.9[41071]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd2-create state=directory owner=root group=root recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:14:50 managed-node2 podman[41251]: 2025-07-07 20:14:50.30172649 -0400 EDT m=+0.335741630 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610 Jul 07 20:14:50 managed-node2 python3.9[41415]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd2.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:14:51 managed-node2 python3.9[41566]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d state=directory owner=root group=0 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:14:51 managed-node2 python3.9[41715]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd2.yml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Jul 07 20:14:51 managed-node2 python3.9[41790]: ansible-ansible.legacy.file Invoked with owner=root group=0 mode=0644 dest=/etc/containers/ansible-kubernetes.d/httpd2.yml _original_basename=.7tnd0tsm recurse=False state=file path=/etc/containers/ansible-kubernetes.d/httpd2.yml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:14:52 managed-node2 python3.9[41939]: ansible-containers.podman.podman_play Invoked with state=started debug=True log_level=debug kube_file=/etc/containers/ansible-kubernetes.d/httpd2.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None quiet=None recreate=None userns=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None Jul 07 20:14:52 managed-node2 podman[41946]: 2025-07-07 20:14:52.281267633 -0400 EDT m=+0.019255481 network create 0a842076c75338ed23c6283f120afac661325ee3d3188d4246c57fcbd0ff921c (name=podman-default-kube-network, type=bridge) Jul 07 20:14:52 managed-node2 systemd[1]: Created slice cgroup machine-libpod_pod_55cf40f422ab8478e9c4da3bb3e4a8e5c2ce02f1f51e3c1d7ff9913ae8d6b45a.slice. ░░ Subject: A start job for unit machine-libpod_pod_55cf40f422ab8478e9c4da3bb3e4a8e5c2ce02f1f51e3c1d7ff9913ae8d6b45a.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit machine-libpod_pod_55cf40f422ab8478e9c4da3bb3e4a8e5c2ce02f1f51e3c1d7ff9913ae8d6b45a.slice has finished successfully. ░░ ░░ The job identifier is 1759. Jul 07 20:14:52 managed-node2 python3.9[41939]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE command: /usr/bin/podman play kube --start=true --log-level=debug /etc/containers/ansible-kubernetes.d/httpd2.yml Jul 07 20:14:52 managed-node2 python3.9[41939]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stdout: Jul 07 20:14:52 managed-node2 python3.9[41939]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stderr: time="2025-07-07T20:14:52-04:00" level=info msg="/usr/bin/podman filtering at log level debug" time="2025-07-07T20:14:52-04:00" level=debug msg="Called kube.PersistentPreRunE(/usr/bin/podman play kube --start=true --log-level=debug /etc/containers/ansible-kubernetes.d/httpd2.yml)" time="2025-07-07T20:14:52-04:00" level=debug msg="Using conmon: \"/usr/bin/conmon\"" time="2025-07-07T20:14:52-04:00" level=info msg="Using sqlite as database backend" time="2025-07-07T20:14:52-04:00" level=debug msg="Using graph driver overlay" time="2025-07-07T20:14:52-04:00" level=debug msg="Using graph root /var/lib/containers/storage" time="2025-07-07T20:14:52-04:00" level=debug msg="Using run root /run/containers/storage" time="2025-07-07T20:14:52-04:00" level=debug msg="Using static dir /var/lib/containers/storage/libpod" time="2025-07-07T20:14:52-04:00" level=debug msg="Using tmp dir /run/libpod" time="2025-07-07T20:14:52-04:00" level=debug msg="Using volume path /var/lib/containers/storage/volumes" time="2025-07-07T20:14:52-04:00" level=debug msg="Using transient store: false" time="2025-07-07T20:14:52-04:00" level=debug msg="[graphdriver] trying provided driver \"overlay\"" time="2025-07-07T20:14:52-04:00" level=debug msg="Cached value indicated that overlay is supported" time="2025-07-07T20:14:52-04:00" level=debug msg="Cached value indicated that overlay is supported" time="2025-07-07T20:14:52-04:00" level=debug msg="Cached value indicated that metacopy is being used" time="2025-07-07T20:14:52-04:00" level=debug msg="Cached value indicated that native-diff is not being used" time="2025-07-07T20:14:52-04:00" level=info msg="Not using native diff for overlay, this may cause degraded performance for building images: kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled" time="2025-07-07T20:14:52-04:00" level=debug msg="backingFs=xfs, projectQuotaSupported=false, useNativeDiff=false, usingMetacopy=true" time="2025-07-07T20:14:52-04:00" level=debug msg="Initializing event backend journald" time="2025-07-07T20:14:52-04:00" level=debug msg="Configured OCI runtime runc initialization failed: no valid executable found for OCI runtime runc: invalid argument" time="2025-07-07T20:14:52-04:00" level=debug msg="Configured OCI runtime runj initialization failed: no valid executable found for OCI runtime runj: invalid argument" time="2025-07-07T20:14:52-04:00" level=debug msg="Configured OCI runtime kata initialization failed: no valid executable found for OCI runtime kata: invalid argument" time="2025-07-07T20:14:52-04:00" level=debug msg="Configured OCI runtime krun initialization failed: no valid executable found for OCI runtime krun: invalid argument" time="2025-07-07T20:14:52-04:00" level=debug msg="Configured OCI runtime ocijail initialization failed: no valid executable found for OCI runtime ocijail: invalid argument" time="2025-07-07T20:14:52-04:00" level=debug msg="Configured OCI runtime crun-vm initialization failed: no valid executable found for OCI runtime crun-vm: invalid argument" time="2025-07-07T20:14:52-04:00" level=debug msg="Configured OCI runtime runsc initialization failed: no valid executable found for OCI runtime runsc: invalid argument" time="2025-07-07T20:14:52-04:00" level=debug msg="Configured OCI runtime youki initialization failed: no valid executable found for OCI runtime youki: invalid argument" time="2025-07-07T20:14:52-04:00" level=debug msg="Configured OCI runtime crun-wasm initialization failed: no valid executable found for OCI runtime crun-wasm: invalid argument" time="2025-07-07T20:14:52-04:00" level=debug msg="Using OCI runtime \"/usr/bin/crun\"" time="2025-07-07T20:14:52-04:00" level=info msg="Setting parallel job count to 7" time="2025-07-07T20:14:52-04:00" level=debug msg="Successfully loaded network podman-default-kube-network: &{podman-default-kube-network 0a842076c75338ed23c6283f120afac661325ee3d3188d4246c57fcbd0ff921c bridge podman1 2025-07-07 20:11:21.084048926 -0400 EDT [{{{10.89.0.0 ffffff00}} 10.89.0.1 }] [] false false true [] map[] map[] map[driver:host-local]}" time="2025-07-07T20:14:52-04:00" level=debug msg="Successfully loaded 2 networks" time="2025-07-07T20:14:52-04:00" level=debug msg="Pod using bridge network mode" time="2025-07-07T20:14:52-04:00" level=debug msg="Created cgroup path machine.slice/machine-libpod_pod_55cf40f422ab8478e9c4da3bb3e4a8e5c2ce02f1f51e3c1d7ff9913ae8d6b45a.slice for parent machine.slice and name libpod_pod_55cf40f422ab8478e9c4da3bb3e4a8e5c2ce02f1f51e3c1d7ff9913ae8d6b45a" time="2025-07-07T20:14:52-04:00" level=debug msg="Created cgroup machine.slice/machine-libpod_pod_55cf40f422ab8478e9c4da3bb3e4a8e5c2ce02f1f51e3c1d7ff9913ae8d6b45a.slice" time="2025-07-07T20:14:52-04:00" level=debug msg="Got pod cgroup as machine.slice/machine-libpod_pod_55cf40f422ab8478e9c4da3bb3e4a8e5c2ce02f1f51e3c1d7ff9913ae8d6b45a.slice" Error: adding pod to state: name "httpd2" is in use: pod already exists time="2025-07-07T20:14:52-04:00" level=debug msg="Shutting down engines" Jul 07 20:14:52 managed-node2 python3.9[41939]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE rc: 125 Jul 07 20:14:53 managed-node2 python3.9[42102]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:14:55 managed-node2 python3.9[42253]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/httpd3.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:14:56 managed-node2 python3.9[42403]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd3 state=directory owner=root group=root recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:14:56 managed-node2 python3.9[42552]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd3-create state=directory owner=root group=root recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:14:57 managed-node2 podman[42732]: 2025-07-07 20:14:57.595089727 -0400 EDT m=+0.334374931 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610 Jul 07 20:14:57 managed-node2 python3.9[42895]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd3.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:14:58 managed-node2 python3.9[43046]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d state=directory owner=root group=0 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:14:59 managed-node2 python3.9[43195]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd3.yml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Jul 07 20:14:59 managed-node2 python3.9[43270]: ansible-ansible.legacy.file Invoked with owner=root group=0 mode=0644 dest=/etc/containers/ansible-kubernetes.d/httpd3.yml _original_basename=.fnfhf1h4 recurse=False state=file path=/etc/containers/ansible-kubernetes.d/httpd3.yml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:14:59 managed-node2 python3.9[43419]: ansible-containers.podman.podman_play Invoked with state=started kube_file=/etc/containers/ansible-kubernetes.d/httpd3.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None debug=None quiet=None recreate=None userns=None log_level=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None Jul 07 20:14:59 managed-node2 podman[43426]: 2025-07-07 20:14:59.794215832 -0400 EDT m=+0.017981924 network create 0a842076c75338ed23c6283f120afac661325ee3d3188d4246c57fcbd0ff921c (name=podman-default-kube-network, type=bridge) Jul 07 20:14:59 managed-node2 systemd[1]: Created slice cgroup machine-libpod_pod_a956533ce71c546925cb35266c34fb208b1e49cd00e4934b1886b8ae13aea530.slice. ░░ Subject: A start job for unit machine-libpod_pod_a956533ce71c546925cb35266c34fb208b1e49cd00e4934b1886b8ae13aea530.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit machine-libpod_pod_a956533ce71c546925cb35266c34fb208b1e49cd00e4934b1886b8ae13aea530.slice has finished successfully. ░░ ░░ The job identifier is 1763. Jul 07 20:15:00 managed-node2 sudo[43582]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:15:00 managed-node2 sudo[43582]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-copmprovbtozwjdqvrxslhkmftgtigcs ; /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933700.520565-16475-105270240946276/AnsiballZ_command.py' Jul 07 20:15:00 managed-node2 sudo[43582]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0) Jul 07 20:15:00 managed-node2 python3.9[43584]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod inspect httpd1 --format '{{.State}}' _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:15:00 managed-node2 systemd[27808]: Started podman-43592.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 146. Jul 07 20:15:00 managed-node2 sudo[43582]: pam_unix(sudo:session): session closed for user podman_basic_user Jul 07 20:15:01 managed-node2 python3.9[43750]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod inspect httpd2 --format '{{.State}}' _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:15:01 managed-node2 python3.9[43908]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod inspect httpd3 --format '{{.State}}' _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:15:02 managed-node2 sudo[44065]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:15:02 managed-node2 sudo[44065]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-omhmgepybavqbezpokrriumisrazocox ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933701.8765578-16521-15111276780862/AnsiballZ_command.py' Jul 07 20:15:02 managed-node2 sudo[44065]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0) Jul 07 20:15:02 managed-node2 python3.9[44067]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail systemctl --user list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd1[.]yml[.]service[ ]+loaded[ ]+active ' _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:15:02 managed-node2 sudo[44065]: pam_unix(sudo:session): session closed for user podman_basic_user Jul 07 20:15:02 managed-node2 python3.9[44219]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail systemctl --system list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd2[.]yml[.]service[ ]+loaded[ ]+active ' _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:15:02 managed-node2 python3.9[44371]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail systemctl --system list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd3[.]yml[.]service[ ]+loaded[ ]+active ' _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:15:03 managed-node2 python3.9[44523]: ansible-ansible.legacy.uri Invoked with url=http://localhost:15001/index.txt return_content=True force=False http_agent=ansible-httpget use_proxy=True validate_certs=True force_basic_auth=False use_gssapi=False body_format=raw method=GET follow_redirects=safe status_code=[200] timeout=30 headers={} remote_src=False unredirected_headers=[] decompress=True use_netrc=True unsafe_writes=False url_username=None url_password=NOT_LOGGING_PARAMETER client_cert=None client_key=None dest=None body=None src=None creates=None removes=None unix_socket=None ca_path=None ciphers=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:15:03 managed-node2 python3.9[44673]: ansible-ansible.legacy.uri Invoked with url=http://localhost:15002/index.txt return_content=True force=False http_agent=ansible-httpget use_proxy=True validate_certs=True force_basic_auth=False use_gssapi=False body_format=raw method=GET follow_redirects=safe status_code=[200] timeout=30 headers={} remote_src=False unredirected_headers=[] decompress=True use_netrc=True unsafe_writes=False url_username=None url_password=NOT_LOGGING_PARAMETER client_cert=None client_key=None dest=None body=None src=None creates=None removes=None unix_socket=None ca_path=None ciphers=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:15:04 managed-node2 python3.9[44823]: ansible-ansible.legacy.uri Invoked with url=http://localhost:15003/index.txt return_content=True force=False http_agent=ansible-httpget use_proxy=True validate_certs=True force_basic_auth=False use_gssapi=False body_format=raw method=GET follow_redirects=safe status_code=[200] timeout=30 headers={} remote_src=False unredirected_headers=[] decompress=True use_netrc=True unsafe_writes=False url_username=None url_password=NOT_LOGGING_PARAMETER client_cert=None client_key=None dest=None body=None src=None creates=None removes=None unix_socket=None ca_path=None ciphers=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:15:07 managed-node2 python3.9[45122]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:15:08 managed-node2 python3.9[45277]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:15:11 managed-node2 python3.9[45428]: ansible-getent Invoked with database=passwd key=podman_basic_user fail_key=False service=None split=None Jul 07 20:15:12 managed-node2 python3.9[45578]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:15:12 managed-node2 python3.9[45729]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:15:12 managed-node2 python3.9[45879]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:15:14 managed-node2 python3.9[46029]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:15:14 managed-node2 python3.9[46179]: ansible-stat Invoked with path=/run/user/3001 follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:15:15 managed-node2 sudo[46330]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:15:15 managed-node2 sudo[46330]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yrlqohfxmrbjsiwrdleklbogdfrytzax ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933715.029206-17136-162272482862195/AnsiballZ_systemd.py' Jul 07 20:15:15 managed-node2 sudo[46330]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0) Jul 07 20:15:15 managed-node2 python3.9[46332]: ansible-systemd Invoked with name=podman-kube@-home-podman_basic_user-.config-containers-ansible\x2dkubernetes.d-httpd1.yml.service scope=user state=stopped enabled=False daemon_reload=False daemon_reexec=False no_block=False force=None masked=None Jul 07 20:15:15 managed-node2 systemd[27808]: Reloading. Jul 07 20:15:15 managed-node2 systemd[27808]: Stopping A template for running K8s workloads via podman-kube-play... ░░ Subject: A stop job for unit UNIT has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has begun execution. ░░ ░░ The job identifier is 150. Jul 07 20:15:25 managed-node2 podman[46347]: time="2025-07-07T20:15:25-04:00" level=warning msg="StopSignal SIGTERM failed to stop container httpd1-httpd1 in 10 seconds, resorting to SIGKILL" Jul 07 20:15:25 managed-node2 kernel: podman1: port 1(veth0) entered disabled state Jul 07 20:15:25 managed-node2 kernel: veth0 (unregistering): left allmulticast mode Jul 07 20:15:25 managed-node2 kernel: veth0 (unregistering): left promiscuous mode Jul 07 20:15:25 managed-node2 kernel: podman1: port 1(veth0) entered disabled state Jul 07 20:15:25 managed-node2 systemd[27808]: Removed slice cgroup user-libpod_pod_a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4.slice. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 151 and the job result is done. Jul 07 20:15:25 managed-node2 systemd[27808]: user-libpod_pod_a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4.slice: Failed to open /run/user/3001/systemd/transient/user-libpod_pod_a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4.slice: No such file or directory Jul 07 20:15:25 managed-node2 systemd[27808]: user-libpod_pod_a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4.slice: Failed to open /run/user/3001/systemd/transient/user-libpod_pod_a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4.slice: No such file or directory Jul 07 20:15:25 managed-node2 systemd[27808]: user-libpod_pod_a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4.slice: Failed to open /run/user/3001/systemd/transient/user-libpod_pod_a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4.slice: No such file or directory Jul 07 20:15:26 managed-node2 podman[46347]: Pods stopped: Jul 07 20:15:26 managed-node2 podman[46347]: a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4 Jul 07 20:15:26 managed-node2 podman[46347]: Pods removed: Jul 07 20:15:26 managed-node2 podman[46347]: a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4 Jul 07 20:15:26 managed-node2 podman[46347]: Secrets removed: Jul 07 20:15:26 managed-node2 podman[46347]: Volumes removed: Jul 07 20:15:26 managed-node2 systemd[27808]: Stopped A template for running K8s workloads via podman-kube-play. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 150 and the job result is done. Jul 07 20:15:26 managed-node2 sudo[46330]: pam_unix(sudo:session): session closed for user podman_basic_user Jul 07 20:15:26 managed-node2 python3.9[46572]: ansible-stat Invoked with path=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:15:26 managed-node2 sudo[46723]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:15:26 managed-node2 sudo[46723]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ngscqobpoontmmgbeazhbfnhtlrerjma ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933726.6496177-17450-135897393375501/AnsiballZ_podman_play.py' Jul 07 20:15:26 managed-node2 sudo[46723]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0) Jul 07 20:15:26 managed-node2 python3.9[46725]: ansible-containers.podman.podman_play Invoked with state=absent debug=True log_level=debug kube_file=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None quiet=None recreate=None userns=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None Jul 07 20:15:26 managed-node2 python3.9[46725]: ansible-containers.podman.podman_play version: 5.5.1, kube file /home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml Jul 07 20:15:27 managed-node2 systemd[27808]: Started podman-46732.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 152. Jul 07 20:15:27 managed-node2 python3.9[46725]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE command: /bin/podman kube play --down /home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml Jul 07 20:15:27 managed-node2 python3.9[46725]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stdout: Pods stopped: Pods removed: Secrets removed: Volumes removed: Jul 07 20:15:27 managed-node2 python3.9[46725]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stderr: Jul 07 20:15:27 managed-node2 python3.9[46725]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE rc: 0 Jul 07 20:15:27 managed-node2 sudo[46723]: pam_unix(sudo:session): session closed for user podman_basic_user Jul 07 20:15:27 managed-node2 python3.9[46888]: ansible-file Invoked with path=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:15:28 managed-node2 python3.9[47037]: ansible-getent Invoked with database=passwd key=root fail_key=False service=None split=None Jul 07 20:15:29 managed-node2 python3.9[47187]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:15:31 managed-node2 python3.9[47338]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/httpd2.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:15:31 managed-node2 python3.9[47488]: ansible-systemd Invoked with name=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service scope=system state=stopped enabled=False daemon_reload=False daemon_reexec=False no_block=False force=None masked=None Jul 07 20:15:31 managed-node2 systemd[1]: Reloading. Jul 07 20:15:31 managed-node2 systemd-rc-local-generator[47509]: /etc/rc.d/rc.local is not marked executable, skipping. Jul 07 20:15:32 managed-node2 systemd[1]: Stopping A template for running K8s workloads via podman-kube-play... ░░ Subject: A stop job for unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service has begun execution. ░░ ░░ The job identifier is 1768. Jul 07 20:15:32 managed-node2 podman[47527]: 2025-07-07 20:15:32.086748492 -0400 EDT m=+0.031435423 pod stop d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0 (image=, name=httpd2) Jul 07 20:15:42 managed-node2 podman[47527]: time="2025-07-07T20:15:42-04:00" level=warning msg="StopSignal SIGTERM failed to stop container httpd2-httpd2 in 10 seconds, resorting to SIGKILL" Jul 07 20:15:42 managed-node2 systemd[1]: libpod-3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5.scope has successfully entered the 'dead' state. Jul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.121044923 -0400 EDT m=+10.065732151 container died 3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service, app=test) Jul 07 20:15:42 managed-node2 systemd[1]: var-lib-containers-storage-overlay-6f2f0e89c245bbf36545733fa9225bf8ac05d0ba658f3773aea7623e3da19632-merged.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay-6f2f0e89c245bbf36545733fa9225bf8ac05d0ba658f3773aea7623e3da19632-merged.mount has successfully entered the 'dead' state. Jul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.165231828 -0400 EDT m=+10.109918731 container cleanup 3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0) Jul 07 20:15:42 managed-node2 systemd[1]: libpod-08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b.scope has successfully entered the 'dead' state. Jul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.176672676 -0400 EDT m=+10.121359827 container died 08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b (image=, name=d17e7ef2e094-infra, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service) Jul 07 20:15:42 managed-node2 kernel: podman1: port 1(veth0) entered disabled state Jul 07 20:15:42 managed-node2 kernel: veth0 (unregistering): left allmulticast mode Jul 07 20:15:42 managed-node2 kernel: veth0 (unregistering): left promiscuous mode Jul 07 20:15:42 managed-node2 kernel: podman1: port 1(veth0) entered disabled state Jul 07 20:15:42 managed-node2 systemd[1]: run-netns-netns\x2d2e00ab52\x2d0e7b\x2d94ee\x2da345\x2dec17caccc43b.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-netns-netns\x2d2e00ab52\x2d0e7b\x2d94ee\x2da345\x2dec17caccc43b.mount has successfully entered the 'dead' state. Jul 07 20:15:42 managed-node2 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b-rootfs-merge.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay\x2dcontainers-08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b-rootfs-merge.mount has successfully entered the 'dead' state. Jul 07 20:15:42 managed-node2 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b-userdata-shm.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay\x2dcontainers-08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b-userdata-shm.mount has successfully entered the 'dead' state. Jul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.276854989 -0400 EDT m=+10.221541921 container cleanup 08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b (image=, name=d17e7ef2e094-infra, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service) Jul 07 20:15:42 managed-node2 systemd[1]: Removed slice cgroup machine-libpod_pod_d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0.slice. ░░ Subject: A stop job for unit machine-libpod_pod_d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0.slice has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit machine-libpod_pod_d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0.slice has finished. ░░ ░░ The job identifier is 1770 and the job result is done. Jul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.302687887 -0400 EDT m=+10.247374820 container remove 3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry) Jul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.329086658 -0400 EDT m=+10.273773592 container remove 08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b (image=, name=d17e7ef2e094-infra, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service) Jul 07 20:15:42 managed-node2 systemd[1]: machine-libpod_pod_d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0.slice: Failed to open /run/systemd/transient/machine-libpod_pod_d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0.slice: No such file or directory Jul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.337213217 -0400 EDT m=+10.281900117 pod remove d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0 (image=, name=httpd2) Jul 07 20:15:42 managed-node2 systemd[1]: libpod-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32.scope has successfully entered the 'dead' state. Jul 07 20:15:42 managed-node2 conmon[32226]: conmon 78627b3638a40af7f868 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32.scope/container/memory.events Jul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.343750156 -0400 EDT m=+10.288437240 container kill 78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32 (image=, name=91ecf1609ad1-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service) Jul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.349945997 -0400 EDT m=+10.294633088 container died 78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32 (image=, name=91ecf1609ad1-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service) Jul 07 20:15:42 managed-node2 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32-rootfs-merge.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay\x2dcontainers-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32-rootfs-merge.mount has successfully entered the 'dead' state. Jul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.409484257 -0400 EDT m=+10.354171379 container remove 78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32 (image=, name=91ecf1609ad1-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service) Jul 07 20:15:42 managed-node2 podman[47527]: Pods stopped: Jul 07 20:15:42 managed-node2 podman[47527]: d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0 Jul 07 20:15:42 managed-node2 podman[47527]: Pods removed: Jul 07 20:15:42 managed-node2 podman[47527]: d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0 Jul 07 20:15:42 managed-node2 podman[47527]: Secrets removed: Jul 07 20:15:42 managed-node2 podman[47527]: Volumes removed: Jul 07 20:15:42 managed-node2 systemd[1]: podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service has successfully entered the 'dead' state. Jul 07 20:15:42 managed-node2 systemd[1]: Stopped A template for running K8s workloads via podman-kube-play. ░░ Subject: A stop job for unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service has finished. ░░ ░░ The job identifier is 1768 and the job result is done. Jul 07 20:15:42 managed-node2 python3.9[47729]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd2.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:15:43 managed-node2 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32-userdata-shm.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay\x2dcontainers-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32-userdata-shm.mount has successfully entered the 'dead' state. Jul 07 20:15:43 managed-node2 python3.9[47880]: ansible-containers.podman.podman_play Invoked with state=absent debug=True log_level=debug kube_file=/etc/containers/ansible-kubernetes.d/httpd2.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None quiet=None recreate=None userns=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None Jul 07 20:15:43 managed-node2 python3.9[47880]: ansible-containers.podman.podman_play version: 5.5.1, kube file /etc/containers/ansible-kubernetes.d/httpd2.yml Jul 07 20:15:43 managed-node2 python3.9[47880]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE command: /usr/bin/podman kube play --down /etc/containers/ansible-kubernetes.d/httpd2.yml Jul 07 20:15:43 managed-node2 python3.9[47880]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stdout: Pods stopped: Pods removed: Secrets removed: Volumes removed: Jul 07 20:15:43 managed-node2 python3.9[47880]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stderr: Jul 07 20:15:43 managed-node2 python3.9[47880]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE rc: 0 Jul 07 20:15:43 managed-node2 python3.9[48043]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d/httpd2.yml state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:15:45 managed-node2 python3.9[48192]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:15:46 managed-node2 python3.9[48343]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/httpd3.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:15:47 managed-node2 python3.9[48493]: ansible-systemd Invoked with name=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service scope=system state=stopped enabled=False daemon_reload=False daemon_reexec=False no_block=False force=None masked=None Jul 07 20:15:47 managed-node2 systemd[1]: Reloading. Jul 07 20:15:47 managed-node2 systemd-rc-local-generator[48513]: /etc/rc.d/rc.local is not marked executable, skipping. Jul 07 20:15:47 managed-node2 systemd[1]: Stopping A template for running K8s workloads via podman-kube-play... ░░ Subject: A stop job for unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service has begun execution. ░░ ░░ The job identifier is 1771. Jul 07 20:15:47 managed-node2 podman[48533]: 2025-07-07 20:15:47.405787867 -0400 EDT m=+0.031643471 pod stop 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36 (image=, name=httpd3) Jul 07 20:15:57 managed-node2 podman[48533]: time="2025-07-07T20:15:57-04:00" level=warning msg="StopSignal SIGTERM failed to stop container httpd3-httpd3 in 10 seconds, resorting to SIGKILL" Jul 07 20:15:57 managed-node2 systemd[1]: libpod-9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109.scope has successfully entered the 'dead' state. Jul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.434010239 -0400 EDT m=+10.059866007 container died 9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0) Jul 07 20:15:57 managed-node2 systemd[1]: var-lib-containers-storage-overlay-628129360f5470c8a5e4c9e68712c0420c79d4a01d22a8088c316ba43c268778-merged.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay-628129360f5470c8a5e4c9e68712c0420c79d4a01d22a8088c316ba43c268778-merged.mount has successfully entered the 'dead' state. Jul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.478667416 -0400 EDT m=+10.104522986 container cleanup 9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service) Jul 07 20:15:57 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jul 07 20:15:57 managed-node2 systemd[1]: libpod-e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641.scope has successfully entered the 'dead' state. Jul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.500190016 -0400 EDT m=+10.126045733 container died e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641 (image=, name=0b34d5e9f949-infra, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service) Jul 07 20:15:57 managed-node2 systemd[1]: run-rce7152e4cf79441b86b3f3ed7d6f4283.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-rce7152e4cf79441b86b3f3ed7d6f4283.scope has successfully entered the 'dead' state. Jul 07 20:15:57 managed-node2 kernel: podman1: port 2(veth1) entered disabled state Jul 07 20:15:57 managed-node2 kernel: veth1 (unregistering): left allmulticast mode Jul 07 20:15:57 managed-node2 kernel: veth1 (unregistering): left promiscuous mode Jul 07 20:15:57 managed-node2 kernel: podman1: port 2(veth1) entered disabled state Jul 07 20:15:57 managed-node2 NetworkManager[644]: [1751933757.5423] device (podman1): state change: activated -> unmanaged (reason 'unmanaged', managed-type: 'removed') Jul 07 20:15:57 managed-node2 systemd[1]: Starting Network Manager Script Dispatcher Service... ░░ Subject: A start job for unit NetworkManager-dispatcher.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has begun execution. ░░ ░░ The job identifier is 1773. Jul 07 20:15:57 managed-node2 systemd[1]: Started Network Manager Script Dispatcher Service. ░░ Subject: A start job for unit NetworkManager-dispatcher.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has finished successfully. ░░ ░░ The job identifier is 1773. Jul 07 20:15:57 managed-node2 systemd[1]: run-netns-netns\x2db2b0269b\x2d6f52\x2d704b\x2de0f2\x2d936fd9832ebd.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-netns-netns\x2db2b0269b\x2d6f52\x2d704b\x2de0f2\x2d936fd9832ebd.mount has successfully entered the 'dead' state. Jul 07 20:15:57 managed-node2 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641-rootfs-merge.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay\x2dcontainers-e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641-rootfs-merge.mount has successfully entered the 'dead' state. Jul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.72272296 -0400 EDT m=+10.348578562 container cleanup e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641 (image=, name=0b34d5e9f949-infra, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service) Jul 07 20:15:57 managed-node2 systemd[1]: Removed slice cgroup machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice. ░░ Subject: A stop job for unit machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice has finished. ░░ ░░ The job identifier is 1839 and the job result is done. Jul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.730067956 -0400 EDT m=+10.355923539 pod stop 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36 (image=, name=httpd3) Jul 07 20:15:57 managed-node2 systemd[1]: machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice: Failed to open /run/systemd/transient/machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice: No such file or directory Jul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.736214931 -0400 EDT m=+10.362070507 pod stop 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36 (image=, name=httpd3) Jul 07 20:15:57 managed-node2 systemd[1]: machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice: Failed to open /run/systemd/transient/machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice: No such file or directory Jul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.760284407 -0400 EDT m=+10.386140034 container remove 9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service) Jul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.786340751 -0400 EDT m=+10.412196374 container remove e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641 (image=, name=0b34d5e9f949-infra, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service) Jul 07 20:15:57 managed-node2 systemd[1]: machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice: Failed to open /run/systemd/transient/machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice: No such file or directory Jul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.794425826 -0400 EDT m=+10.420281396 pod remove 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36 (image=, name=httpd3) Jul 07 20:15:57 managed-node2 systemd[1]: libpod-eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc.scope has successfully entered the 'dead' state. Jul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.797955293 -0400 EDT m=+10.423811069 container kill eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc (image=, name=55c9b4d93968-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service) Jul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.805138661 -0400 EDT m=+10.430994476 container died eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc (image=, name=55c9b4d93968-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service) Jul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.86757777 -0400 EDT m=+10.493433378 container remove eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc (image=, name=55c9b4d93968-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service) Jul 07 20:15:57 managed-node2 podman[48533]: Pods stopped: Jul 07 20:15:57 managed-node2 podman[48533]: 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36 Jul 07 20:15:57 managed-node2 podman[48533]: Pods removed: Jul 07 20:15:57 managed-node2 podman[48533]: 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36 Jul 07 20:15:57 managed-node2 podman[48533]: Secrets removed: Jul 07 20:15:57 managed-node2 podman[48533]: Volumes removed: Jul 07 20:15:57 managed-node2 systemd[1]: podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service has successfully entered the 'dead' state. Jul 07 20:15:57 managed-node2 systemd[1]: Stopped A template for running K8s workloads via podman-kube-play. ░░ Subject: A stop job for unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service has finished. ░░ ░░ The job identifier is 1771 and the job result is done. Jul 07 20:15:58 managed-node2 python3.9[48770]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd3.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:15:58 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jul 07 20:15:58 managed-node2 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641-userdata-shm.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay\x2dcontainers-e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641-userdata-shm.mount has successfully entered the 'dead' state. Jul 07 20:15:58 managed-node2 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc-rootfs-merge.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay\x2dcontainers-eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc-rootfs-merge.mount has successfully entered the 'dead' state. Jul 07 20:15:58 managed-node2 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc-userdata-shm.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay\x2dcontainers-eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc-userdata-shm.mount has successfully entered the 'dead' state. Jul 07 20:15:58 managed-node2 python3.9[48921]: ansible-containers.podman.podman_play Invoked with state=absent kube_file=/etc/containers/ansible-kubernetes.d/httpd3.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None debug=None quiet=None recreate=None userns=None log_level=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None Jul 07 20:15:58 managed-node2 python3.9[48921]: ansible-containers.podman.podman_play version: 5.5.1, kube file /etc/containers/ansible-kubernetes.d/httpd3.yml Jul 07 20:15:58 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jul 07 20:15:59 managed-node2 python3.9[49083]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d/httpd3.yml state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:16:00 managed-node2 python3.9[49232]: ansible-getent Invoked with database=passwd key=podman_basic_user fail_key=True service=None split=None Jul 07 20:16:00 managed-node2 python3.9[49382]: ansible-stat Invoked with path=/run/user/3001 follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:01 managed-node2 sudo[49533]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:16:01 managed-node2 sudo[49533]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-eyaoewgeadnpajdfsqkdmnspilftsmzm ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933761.058141-18477-143961090014844/AnsiballZ_podman_container_info.py' Jul 07 20:16:01 managed-node2 sudo[49533]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0) Jul 07 20:16:01 managed-node2 python3.9[49535]: ansible-containers.podman.podman_container_info Invoked with executable=podman name=None Jul 07 20:16:01 managed-node2 systemd[27808]: Started podman-49536.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 156. Jul 07 20:16:01 managed-node2 sudo[49533]: pam_unix(sudo:session): session closed for user podman_basic_user Jul 07 20:16:01 managed-node2 sudo[49691]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:16:01 managed-node2 sudo[49691]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-csyhukllkdoqxtgiejqztpcafyureeyp ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933761.7158227-18498-173793406938955/AnsiballZ_command.py' Jul 07 20:16:01 managed-node2 sudo[49691]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0) Jul 07 20:16:01 managed-node2 python3.9[49693]: ansible-ansible.legacy.command Invoked with _raw_params=podman network ls -q _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:02 managed-node2 systemd[27808]: Started podman-49694.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 160. Jul 07 20:16:02 managed-node2 sudo[49691]: pam_unix(sudo:session): session closed for user podman_basic_user Jul 07 20:16:02 managed-node2 sudo[49850]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:16:02 managed-node2 sudo[49850]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nugdaehmscgbqoulldhoxbffneulpeqi ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933762.1564903-18516-230631949404561/AnsiballZ_command.py' Jul 07 20:16:02 managed-node2 sudo[49850]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0) Jul 07 20:16:02 managed-node2 python3.9[49852]: ansible-ansible.legacy.command Invoked with _raw_params=podman secret ls -n -q _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:02 managed-node2 systemd[27808]: Started podman-49853.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 164. Jul 07 20:16:02 managed-node2 sudo[49850]: pam_unix(sudo:session): session closed for user podman_basic_user Jul 07 20:16:02 managed-node2 python3.9[50009]: ansible-ansible.legacy.command Invoked with removes=/var/lib/systemd/linger/podman_basic_user _raw_params=loginctl disable-linger podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None stdin=None Jul 07 20:16:02 managed-node2 systemd[1]: Stopping User Manager for UID 3001... ░░ Subject: A stop job for unit user@3001.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit user@3001.service has begun execution. ░░ ░░ The job identifier is 1840. Jul 07 20:16:02 managed-node2 systemd[27808]: Activating special unit Exit the Session... Jul 07 20:16:02 managed-node2 systemd[27808]: Stopping podman-pause-7fbe17c5.scope... ░░ Subject: A stop job for unit UNIT has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has begun execution. ░░ ░░ The job identifier is 181. Jul 07 20:16:02 managed-node2 systemd[27808]: Removed slice Slice /app/podman-kube. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 183 and the job result is done. Jul 07 20:16:02 managed-node2 systemd[27808]: Removed slice cgroup user-libpod_pod_f60d38092e129513b51a0b2f07fc5347e46ea863cdd2a5b1d5752245e63e591c.slice. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 180 and the job result is done. Jul 07 20:16:02 managed-node2 systemd[27808]: Stopped target Main User Target. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 174 and the job result is done. Jul 07 20:16:02 managed-node2 systemd[27808]: Stopped target Basic System. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 187 and the job result is done. Jul 07 20:16:02 managed-node2 systemd[27808]: Stopped target Paths. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 185 and the job result is done. Jul 07 20:16:02 managed-node2 systemd[27808]: Stopped target Sockets. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 173 and the job result is done. Jul 07 20:16:02 managed-node2 systemd[27808]: Stopped target Timers. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 188 and the job result is done. Jul 07 20:16:02 managed-node2 systemd[27808]: Stopped Mark boot as successful after the user session has run 2 minutes. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 186 and the job result is done. Jul 07 20:16:02 managed-node2 systemd[27808]: Stopped Daily Cleanup of User's Temporary Directories. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 172 and the job result is done. Jul 07 20:16:02 managed-node2 dbus-broker[28296]: Dispatched 2118 messages @ 3(±15)μs / message. ░░ Subject: Dispatched 2118 messages ░░ Defined-By: dbus-broker ░░ Support: https://groups.google.com/forum/#!forum/bus1-devel ░░ ░░ This message is printed by dbus-broker when shutting down. It includes metric ░░ information collected during the runtime of dbus-broker. ░░ ░░ The message lists the number of dispatched messages ░░ (in this case 2118) as well as the mean time to ░░ handling a single message. The time measurements exclude the time spent on ░░ writing to and reading from the kernel. Jul 07 20:16:02 managed-node2 systemd[27808]: Stopping D-Bus User Message Bus... ░░ Subject: A stop job for unit UNIT has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has begun execution. ░░ ░░ The job identifier is 171. Jul 07 20:16:02 managed-node2 systemd[27808]: Stopped Create User's Volatile Files and Directories. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 176 and the job result is done. Jul 07 20:16:02 managed-node2 systemd[27808]: Stopped D-Bus User Message Bus. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 171 and the job result is done. Jul 07 20:16:02 managed-node2 systemd[27808]: Stopped podman-pause-7fbe17c5.scope. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 181 and the job result is done. Jul 07 20:16:02 managed-node2 systemd[27808]: Removed slice Slice /user. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 179 and the job result is done. Jul 07 20:16:02 managed-node2 systemd[27808]: Closed D-Bus User Message Bus Socket. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 175 and the job result is done. Jul 07 20:16:02 managed-node2 systemd[27808]: Removed slice User Application Slice. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 184 and the job result is done. Jul 07 20:16:02 managed-node2 systemd[27808]: Reached target Shutdown. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 170. Jul 07 20:16:02 managed-node2 systemd[27808]: Finished Exit the Session. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 169. Jul 07 20:16:02 managed-node2 systemd[27808]: Reached target Exit the Session. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 168. Jul 07 20:16:02 managed-node2 systemd[1]: user@3001.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit user@3001.service has successfully entered the 'dead' state. Jul 07 20:16:02 managed-node2 systemd[1]: Stopped User Manager for UID 3001. ░░ Subject: A stop job for unit user@3001.service has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit user@3001.service has finished. ░░ ░░ The job identifier is 1840 and the job result is done. Jul 07 20:16:02 managed-node2 systemd[1]: user@3001.service: Consumed 2.173s CPU time. ░░ Subject: Resources consumed by unit runtime ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit user@3001.service completed and consumed the indicated resources. Jul 07 20:16:02 managed-node2 systemd[1]: Stopping User Runtime Directory /run/user/3001... ░░ Subject: A stop job for unit user-runtime-dir@3001.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit user-runtime-dir@3001.service has begun execution. ░░ ░░ The job identifier is 1841. Jul 07 20:16:03 managed-node2 systemd[1]: run-user-3001.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-user-3001.mount has successfully entered the 'dead' state. Jul 07 20:16:03 managed-node2 systemd[1]: user-runtime-dir@3001.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit user-runtime-dir@3001.service has successfully entered the 'dead' state. Jul 07 20:16:03 managed-node2 systemd[1]: Stopped User Runtime Directory /run/user/3001. ░░ Subject: A stop job for unit user-runtime-dir@3001.service has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit user-runtime-dir@3001.service has finished. ░░ ░░ The job identifier is 1841 and the job result is done. Jul 07 20:16:03 managed-node2 systemd[1]: Removed slice User Slice of UID 3001. ░░ Subject: A stop job for unit user-3001.slice has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit user-3001.slice has finished. ░░ ░░ The job identifier is 1843 and the job result is done. Jul 07 20:16:03 managed-node2 systemd[1]: user-3001.slice: Consumed 2.196s CPU time. ░░ Subject: Resources consumed by unit runtime ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit user-3001.slice completed and consumed the indicated resources. Jul 07 20:16:03 managed-node2 python3.9[50161]: ansible-ansible.legacy.command Invoked with _raw_params=loginctl show-user --value -p State podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:04 managed-node2 sudo[50311]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:16:04 managed-node2 sudo[50311]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dpicetofkbhrscpezuhafexsvxxmiwru ; /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933764.0345392-18589-160568711821160/AnsiballZ_command.py' Jul 07 20:16:04 managed-node2 sudo[50311]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0) Jul 07 20:16:04 managed-node2 python3.9[50313]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod exists httpd1 _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:04 managed-node2 sudo[50311]: pam_unix(sudo:session): session closed for user podman_basic_user Jul 07 20:16:04 managed-node2 python3.9[50468]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod exists httpd2 _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:04 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jul 07 20:16:05 managed-node2 python3.9[50624]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod exists httpd3 _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:05 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jul 07 20:16:05 managed-node2 sudo[50781]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:16:05 managed-node2 sudo[50781]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ulqlnfrznxyujwhsbktkenenjnaaarpn ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933765.2955933-18640-73245982266512/AnsiballZ_command.py' Jul 07 20:16:05 managed-node2 sudo[50781]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0) Jul 07 20:16:05 managed-node2 python3.9[50783]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail systemctl --user list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd1[.]yml[.]service[ ]+loaded[ ]+active ' _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:05 managed-node2 sudo[50781]: pam_unix(sudo:session): session closed for user podman_basic_user Jul 07 20:16:05 managed-node2 python3.9[50935]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail systemctl --system list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd2[.]yml[.]service[ ]+loaded[ ]+active ' _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:06 managed-node2 python3.9[51087]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail systemctl --system list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd3[.]yml[.]service[ ]+loaded[ ]+active ' _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:06 managed-node2 python3.9[51239]: ansible-stat Invoked with path=/var/lib/systemd/linger/podman_basic_user follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:07 managed-node2 systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit NetworkManager-dispatcher.service has successfully entered the 'dead' state. Jul 07 20:16:09 managed-node2 python3.9[51537]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:10 managed-node2 python3.9[51692]: ansible-getent Invoked with database=passwd key=root fail_key=False service=None split=None Jul 07 20:16:10 managed-node2 python3.9[51842]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:14 managed-node2 python3.9[51993]: ansible-getent Invoked with database=passwd key=podman_basic_user fail_key=False service=None split=None Jul 07 20:16:14 managed-node2 python3.9[52143]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:15 managed-node2 python3.9[52294]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:15 managed-node2 python3.9[52444]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:17 managed-node2 python3.9[52594]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:17 managed-node2 python3.9[52744]: ansible-stat Invoked with path=/run/user/3001 follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:17 managed-node2 python3.9[52893]: ansible-stat Invoked with path=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:18 managed-node2 python3.9[53042]: ansible-file Invoked with path=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:16:19 managed-node2 python3.9[53191]: ansible-getent Invoked with database=passwd key=root fail_key=False service=None split=None Jul 07 20:16:20 managed-node2 python3.9[53341]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:21 managed-node2 python3.9[53492]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/httpd2.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:21 managed-node2 python3.9[53642]: ansible-systemd Invoked with name=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd2.yml.service scope=system state=stopped enabled=False daemon_reload=False daemon_reexec=False no_block=False force=None masked=None Jul 07 20:16:22 managed-node2 python3.9[53793]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd2.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:22 managed-node2 python3.9[53942]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d/httpd2.yml state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:16:23 managed-node2 python3.9[54091]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:25 managed-node2 python3.9[54242]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/httpd3.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:25 managed-node2 python3.9[54392]: ansible-systemd Invoked with name=podman-kube@-etc-containers-ansible\x2dkubernetes.d-httpd3.yml.service scope=system state=stopped enabled=False daemon_reload=False daemon_reexec=False no_block=False force=None masked=None Jul 07 20:16:26 managed-node2 python3.9[54543]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd3.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:26 managed-node2 python3.9[54692]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d/httpd3.yml state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:16:27 managed-node2 python3.9[54841]: ansible-getent Invoked with database=passwd key=podman_basic_user fail_key=True service=None split=None Jul 07 20:16:28 managed-node2 python3.9[54991]: ansible-stat Invoked with path=/run/user/3001 follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:29 managed-node2 python3.9[55140]: ansible-file Invoked with path=/etc/containers/storage.conf state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:16:30 managed-node2 python3.9[55289]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:16:33 managed-node2 python3.9[55487]: ansible-ansible.legacy.setup Invoked with gather_subset=['all'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d Jul 07 20:16:34 managed-node2 python3.9[55662]: ansible-stat Invoked with path=/run/ostree-booted follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:34 managed-node2 python3.9[55811]: ansible-stat Invoked with path=/sbin/transactional-update follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:36 managed-node2 python3.9[56109]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:37 managed-node2 python3.9[56264]: ansible-getent Invoked with database=passwd key=root fail_key=False service=None split=None Jul 07 20:16:37 managed-node2 python3.9[56414]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:40 managed-node2 python3.9[56565]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:41 managed-node2 python3.9[56716]: ansible-file Invoked with path=/etc/containers/systemd state=directory owner=root group=0 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:16:41 managed-node2 python3.9[56865]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/systemd/quadlet-pod-pod.pod follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Jul 07 20:16:42 managed-node2 python3.9[56985]: ansible-ansible.legacy.copy Invoked with src=/root/.ansible/tmp/ansible-tmp-1751933801.5835004-19965-109711770661066/.source.pod dest=/etc/containers/systemd/quadlet-pod-pod.pod owner=root group=0 mode=0644 follow=False _original_basename=systemd.j2 checksum=1884c880482430d8bf2e944b003734fb8b7a462d backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:16:42 managed-node2 python3.9[57134]: ansible-systemd Invoked with daemon_reload=True scope=system daemon_reexec=False no_block=False name=None state=None enabled=None force=None masked=None Jul 07 20:16:42 managed-node2 systemd[1]: Reloading. Jul 07 20:16:43 managed-node2 systemd-rc-local-generator[57151]: /etc/rc.d/rc.local is not marked executable, skipping. Jul 07 20:16:43 managed-node2 python3.9[57317]: ansible-systemd Invoked with name=quadlet-pod-pod-pod.service scope=system state=started daemon_reload=False daemon_reexec=False no_block=False enabled=None force=None masked=None Jul 07 20:16:43 managed-node2 systemd[1]: Starting quadlet-pod-pod-pod.service... ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has begun execution. ░░ ░░ The job identifier is 1845. Jul 07 20:16:43 managed-node2 systemd[1]: var-lib-containers-storage-overlay-metacopy\x2dcheck327374229-merged.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay-metacopy\x2dcheck327374229-merged.mount has successfully entered the 'dead' state. Jul 07 20:16:43 managed-node2 systemd[1]: Created slice cgroup machine-libpod_pod_e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79.slice. ░░ Subject: A start job for unit machine-libpod_pod_e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit machine-libpod_pod_e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79.slice has finished successfully. ░░ ░░ The job identifier is 1916. Jul 07 20:16:43 managed-node2 podman[57321]: 2025-07-07 20:16:43.753251921 -0400 EDT m=+0.075359120 container create 8854ba6a76c45d1f49cbb40fb6b5ea32b169bc30ffa29374b62851695b180a1c (image=, name=quadlet-pod-infra, pod_id=e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service) Jul 07 20:16:43 managed-node2 podman[57321]: 2025-07-07 20:16:43.760005549 -0400 EDT m=+0.082112720 pod create e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79 (image=, name=quadlet-pod) Jul 07 20:16:43 managed-node2 quadlet-pod-pod-pod[57321]: e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79 Jul 07 20:16:43 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8066] manager: (podman0): new Bridge device (/org/freedesktop/NetworkManager/Devices/9) Jul 07 20:16:43 managed-node2 kernel: podman0: port 1(veth0) entered blocking state Jul 07 20:16:43 managed-node2 kernel: podman0: port 1(veth0) entered disabled state Jul 07 20:16:43 managed-node2 kernel: veth0: entered allmulticast mode Jul 07 20:16:43 managed-node2 kernel: veth0: entered promiscuous mode Jul 07 20:16:43 managed-node2 kernel: podman0: port 1(veth0) entered blocking state Jul 07 20:16:43 managed-node2 kernel: podman0: port 1(veth0) entered forwarding state Jul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8206] manager: (veth0): new Veth device (/org/freedesktop/NetworkManager/Devices/10) Jul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8221] device (veth0): carrier: link connected Jul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8226] device (podman0): carrier: link connected Jul 07 20:16:43 managed-node2 systemd-udevd[57347]: Network interface NamePolicy= disabled on kernel command line. Jul 07 20:16:43 managed-node2 systemd-udevd[57348]: Network interface NamePolicy= disabled on kernel command line. Jul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8651] device (podman0): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external') Jul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8658] device (podman0): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'external') Jul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8668] device (podman0): Activation: starting connection 'podman0' (0dc63386-fc14-4ac2-8cee-25b24d1739b5) Jul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8670] device (podman0): state change: disconnected -> prepare (reason 'none', managed-type: 'external') Jul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8673] device (podman0): state change: prepare -> config (reason 'none', managed-type: 'external') Jul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8676] device (podman0): state change: config -> ip-config (reason 'none', managed-type: 'external') Jul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8679] device (podman0): state change: ip-config -> ip-check (reason 'none', managed-type: 'external') Jul 07 20:16:43 managed-node2 systemd[1]: Starting Network Manager Script Dispatcher Service... ░░ Subject: A start job for unit NetworkManager-dispatcher.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has begun execution. ░░ ░░ The job identifier is 1921. Jul 07 20:16:43 managed-node2 systemd[1]: Started Network Manager Script Dispatcher Service. ░░ Subject: A start job for unit NetworkManager-dispatcher.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has finished successfully. ░░ ░░ The job identifier is 1921. Jul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8930] device (podman0): state change: ip-check -> secondaries (reason 'none', managed-type: 'external') Jul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8932] device (podman0): state change: secondaries -> activated (reason 'none', managed-type: 'external') Jul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8938] device (podman0): Activation: successful, device activated. Jul 07 20:16:43 managed-node2 systemd[1]: Started libcrun container. ░░ Subject: A start job for unit libpod-8854ba6a76c45d1f49cbb40fb6b5ea32b169bc30ffa29374b62851695b180a1c.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-8854ba6a76c45d1f49cbb40fb6b5ea32b169bc30ffa29374b62851695b180a1c.scope has finished successfully. ░░ ░░ The job identifier is 1987. Jul 07 20:16:43 managed-node2 podman[57329]: 2025-07-07 20:16:43.975146229 -0400 EDT m=+0.200627141 container init 8854ba6a76c45d1f49cbb40fb6b5ea32b169bc30ffa29374b62851695b180a1c (image=, name=quadlet-pod-infra, pod_id=e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service) Jul 07 20:16:43 managed-node2 podman[57329]: 2025-07-07 20:16:43.981266459 -0400 EDT m=+0.206747252 container start 8854ba6a76c45d1f49cbb40fb6b5ea32b169bc30ffa29374b62851695b180a1c (image=, name=quadlet-pod-infra, pod_id=e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service) Jul 07 20:16:43 managed-node2 podman[57329]: 2025-07-07 20:16:43.987430031 -0400 EDT m=+0.212910758 pod start e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79 (image=, name=quadlet-pod) Jul 07 20:16:43 managed-node2 quadlet-pod-pod-pod[57329]: quadlet-pod Jul 07 20:16:43 managed-node2 systemd[1]: Started quadlet-pod-pod-pod.service. ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has finished successfully. ░░ ░░ The job identifier is 1845. Jul 07 20:16:44 managed-node2 python3.9[57565]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:46 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jul 07 20:16:46 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jul 07 20:16:46 managed-node2 podman[57748]: 2025-07-07 20:16:46.651148299 -0400 EDT m=+0.387653928 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610 Jul 07 20:16:47 managed-node2 python3.9[57912]: ansible-file Invoked with path=/etc/containers/systemd state=directory owner=root group=0 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:16:47 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Jul 07 20:16:47 managed-node2 python3.9[58061]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/systemd/quadlet-pod-container.container follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Jul 07 20:16:47 managed-node2 python3.9[58181]: ansible-ansible.legacy.copy Invoked with src=/root/.ansible/tmp/ansible-tmp-1751933807.2449324-20069-121688553430320/.source.container dest=/etc/containers/systemd/quadlet-pod-container.container owner=root group=0 mode=0644 follow=False _original_basename=systemd.j2 checksum=f0b5c8159fc3c65bf9310a371751609e4c1ba4c3 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:16:48 managed-node2 python3.9[58330]: ansible-systemd Invoked with daemon_reload=True scope=system daemon_reexec=False no_block=False name=None state=None enabled=None force=None masked=None Jul 07 20:16:48 managed-node2 systemd[1]: Reloading. Jul 07 20:16:48 managed-node2 systemd-rc-local-generator[58347]: /etc/rc.d/rc.local is not marked executable, skipping. Jul 07 20:16:48 managed-node2 python3.9[58513]: ansible-systemd Invoked with name=quadlet-pod-container.service scope=system state=started daemon_reload=False daemon_reexec=False no_block=False enabled=None force=None masked=None Jul 07 20:16:49 managed-node2 systemd[1]: Starting quadlet-pod-container.service... ░░ Subject: A start job for unit quadlet-pod-container.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-container.service has begun execution. ░░ ░░ The job identifier is 1992. Jul 07 20:16:49 managed-node2 podman[58517]: 2025-07-07 20:16:49.074860568 -0400 EDT m=+0.046591683 container create b345bf186b4d8ce4960e19da7b04d5b12bd2095620bf2b36a22c1a624a5edc3e (image=quay.io/libpod/testimage:20210610, name=quadlet-pod-container, pod_id=e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79, PODMAN_SYSTEMD_UNIT=quadlet-pod-container.service, created_by=test/system/build-testimage, io.buildah.version=1.21.0, created_at=2021-06-10T18:55:36Z) Jul 07 20:16:49 managed-node2 systemd[1]: var-lib-containers-storage-overlay-volatile\x2dcheck976746358-merged.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay-volatile\x2dcheck976746358-merged.mount has successfully entered the 'dead' state. Jul 07 20:16:49 managed-node2 podman[58517]: 2025-07-07 20:16:49.117361038 -0400 EDT m=+0.089092248 container init b345bf186b4d8ce4960e19da7b04d5b12bd2095620bf2b36a22c1a624a5edc3e (image=quay.io/libpod/testimage:20210610, name=quadlet-pod-container, pod_id=e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79, created_by=test/system/build-testimage, io.buildah.version=1.21.0, created_at=2021-06-10T18:55:36Z, PODMAN_SYSTEMD_UNIT=quadlet-pod-container.service) Jul 07 20:16:49 managed-node2 systemd[1]: Started quadlet-pod-container.service. ░░ Subject: A start job for unit quadlet-pod-container.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-container.service has finished successfully. ░░ ░░ The job identifier is 1992. Jul 07 20:16:49 managed-node2 podman[58517]: 2025-07-07 20:16:49.122201448 -0400 EDT m=+0.093932749 container start b345bf186b4d8ce4960e19da7b04d5b12bd2095620bf2b36a22c1a624a5edc3e (image=quay.io/libpod/testimage:20210610, name=quadlet-pod-container, pod_id=e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79, created_at=2021-06-10T18:55:36Z, PODMAN_SYSTEMD_UNIT=quadlet-pod-container.service, created_by=test/system/build-testimage, io.buildah.version=1.21.0) Jul 07 20:16:49 managed-node2 quadlet-pod-container[58517]: b345bf186b4d8ce4960e19da7b04d5b12bd2095620bf2b36a22c1a624a5edc3e Jul 07 20:16:49 managed-node2 podman[58517]: 2025-07-07 20:16:49.05249592 -0400 EDT m=+0.024227291 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610 Jul 07 20:16:49 managed-node2 python3.9[58679]: ansible-ansible.legacy.command Invoked with _raw_params=cat /etc/containers/systemd/quadlet-pod-container.container _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:49 managed-node2 python3.9[58829]: ansible-ansible.legacy.command Invoked with _raw_params=cat /etc/containers/systemd/quadlet-pod-pod.pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:50 managed-node2 python3.9[58979]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod inspect quadlet-pod --format '{{range .Containers}}{{.Name}} {{end}}' _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:50 managed-node2 python3.9[59137]: ansible-user Invoked with name=user_quadlet_pod uid=2223 state=present non_unique=False force=False remove=False create_home=True system=False move_home=False append=False ssh_key_bits=0 ssh_key_type=rsa ssh_key_comment=ansible-generated on managed-node2 update_password=always group=None groups=None comment=None home=None shell=None password=NOT_LOGGING_PARAMETER login_class=None password_expire_max=None password_expire_min=None password_expire_warn=None hidden=None seuser=None skeleton=None generate_ssh_key=None ssh_key_file=None ssh_key_passphrase=NOT_LOGGING_PARAMETER expires=None password_lock=None local=None profile=None authorization=None role=None umask=None Jul 07 20:16:50 managed-node2 useradd[59139]: new group: name=user_quadlet_pod, GID=2223 Jul 07 20:16:50 managed-node2 useradd[59139]: new user: name=user_quadlet_pod, UID=2223, GID=2223, home=/home/user_quadlet_pod, shell=/bin/bash, from=/dev/pts/0 Jul 07 20:16:50 managed-node2 rsyslogd[812]: imjournal: journal files changed, reloading... [v8.2412.0-2.el9 try https://www.rsyslog.com/e/0 ] Jul 07 20:16:52 managed-node2 python3.9[59444]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:53 managed-node2 python3.9[59599]: ansible-getent Invoked with database=passwd key=user_quadlet_pod fail_key=False service=None split=None Jul 07 20:16:53 managed-node2 python3.9[59749]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:53 managed-node2 systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit NetworkManager-dispatcher.service has successfully entered the 'dead' state. Jul 07 20:16:54 managed-node2 python3.9[59900]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:54 managed-node2 python3.9[60050]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:56 managed-node2 python3.9[60200]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:16:57 managed-node2 python3.9[60351]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:57 managed-node2 python3.9[60501]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:16:58 managed-node2 python3.9[60651]: ansible-ansible.legacy.command Invoked with creates=/var/lib/systemd/linger/user_quadlet_pod _raw_params=loginctl enable-linger user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None removes=None stdin=None Jul 07 20:16:58 managed-node2 systemd[1]: Created slice User Slice of UID 2223. ░░ Subject: A start job for unit user-2223.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit user-2223.slice has finished successfully. ░░ ░░ The job identifier is 2130. Jul 07 20:16:58 managed-node2 systemd[1]: Starting User Runtime Directory /run/user/2223... ░░ Subject: A start job for unit user-runtime-dir@2223.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit user-runtime-dir@2223.service has begun execution. ░░ ░░ The job identifier is 2065. Jul 07 20:16:58 managed-node2 systemd[1]: Finished User Runtime Directory /run/user/2223. ░░ Subject: A start job for unit user-runtime-dir@2223.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit user-runtime-dir@2223.service has finished successfully. ░░ ░░ The job identifier is 2065. Jul 07 20:16:58 managed-node2 systemd[1]: Starting User Manager for UID 2223... ░░ Subject: A start job for unit user@2223.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit user@2223.service has begun execution. ░░ ░░ The job identifier is 2064. Jul 07 20:16:58 managed-node2 systemd[60658]: pam_unix(systemd-user:session): session opened for user user_quadlet_pod(uid=2223) by user_quadlet_pod(uid=0) Jul 07 20:16:58 managed-node2 systemd[60658]: Queued start job for default target Main User Target. Jul 07 20:16:58 managed-node2 systemd[60658]: Created slice User Application Slice. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 4. Jul 07 20:16:58 managed-node2 systemd[60658]: Started Mark boot as successful after the user session has run 2 minutes. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 8. Jul 07 20:16:58 managed-node2 systemd[60658]: Started Daily Cleanup of User's Temporary Directories. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 9. Jul 07 20:16:58 managed-node2 systemd[60658]: Reached target Paths. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 12. Jul 07 20:16:58 managed-node2 systemd[60658]: Reached target Timers. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 7. Jul 07 20:16:58 managed-node2 systemd[60658]: Starting D-Bus User Message Bus Socket... ░░ Subject: A start job for unit UNIT has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has begun execution. ░░ ░░ The job identifier is 11. Jul 07 20:16:58 managed-node2 systemd[60658]: Starting Create User's Volatile Files and Directories... ░░ Subject: A start job for unit UNIT has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has begun execution. ░░ ░░ The job identifier is 3. Jul 07 20:16:58 managed-node2 systemd[60658]: Listening on D-Bus User Message Bus Socket. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 11. Jul 07 20:16:58 managed-node2 systemd[60658]: Finished Create User's Volatile Files and Directories. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 3. Jul 07 20:16:58 managed-node2 systemd[60658]: Reached target Sockets. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 10. Jul 07 20:16:58 managed-node2 systemd[60658]: Reached target Basic System. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 2. Jul 07 20:16:58 managed-node2 systemd[60658]: Reached target Main User Target. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 1. Jul 07 20:16:58 managed-node2 systemd[60658]: Startup finished in 65ms. ░░ Subject: User manager start-up is now complete ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The user manager instance for user 2223 has been started. All services queued ░░ for starting have been started. Note that other services might still be starting ░░ up or be started at any later time. ░░ ░░ Startup of the manager took 65603 microseconds. Jul 07 20:16:58 managed-node2 systemd[1]: Started User Manager for UID 2223. ░░ Subject: A start job for unit user@2223.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit user@2223.service has finished successfully. ░░ ░░ The job identifier is 2064. Jul 07 20:16:59 managed-node2 python3.9[60817]: ansible-file Invoked with path=/home/user_quadlet_pod/.config/containers/systemd state=directory owner=user_quadlet_pod group=2223 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:16:59 managed-node2 python3.9[60966]: ansible-ansible.legacy.stat Invoked with path=/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-pod.pod follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Jul 07 20:17:00 managed-node2 python3.9[61086]: ansible-ansible.legacy.copy Invoked with src=/root/.ansible/tmp/ansible-tmp-1751933819.4453251-20404-152886448732131/.source.pod dest=/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-pod.pod owner=user_quadlet_pod group=2223 mode=0644 follow=False _original_basename=systemd.j2 checksum=1884c880482430d8bf2e944b003734fb8b7a462d backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:17:00 managed-node2 sudo[61235]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:17:00 managed-node2 sudo[61235]: root : TTY=pts/0 ; PWD=/root ; USER=user_quadlet_pod ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qycimnmrylvnpkxuzbdovgpddpoemvav ; XDG_RUNTIME_DIR=/run/user/2223 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933820.1430469-20434-39126838474860/AnsiballZ_systemd.py' Jul 07 20:17:00 managed-node2 sudo[61235]: pam_unix(sudo:session): session opened for user user_quadlet_pod(uid=2223) by root(uid=0) Jul 07 20:17:00 managed-node2 python3.9[61237]: ansible-systemd Invoked with daemon_reload=True scope=user daemon_reexec=False no_block=False name=None state=None enabled=None force=None masked=None Jul 07 20:17:00 managed-node2 python3.9[61237]: ansible-systemd [WARNING] Module remote_tmp /home/user_quadlet_pod/.ansible/tmp did not exist and was created with a mode of 0700, this may cause issues when running as another user. To avoid this, create the remote_tmp dir with the correct permissions manually Jul 07 20:17:00 managed-node2 systemd[60658]: Reloading. Jul 07 20:17:00 managed-node2 sudo[61235]: pam_unix(sudo:session): session closed for user user_quadlet_pod Jul 07 20:17:00 managed-node2 sudo[61397]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:17:00 managed-node2 sudo[61397]: root : TTY=pts/0 ; PWD=/root ; USER=user_quadlet_pod ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nnnqmwerpaczbueaimzsjafnuajzqvhs ; XDG_RUNTIME_DIR=/run/user/2223 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933820.7074661-20450-27497593035399/AnsiballZ_systemd.py' Jul 07 20:17:00 managed-node2 sudo[61397]: pam_unix(sudo:session): session opened for user user_quadlet_pod(uid=2223) by root(uid=0) Jul 07 20:17:01 managed-node2 python3.9[61399]: ansible-systemd Invoked with name=quadlet-pod-pod-pod.service scope=user state=started daemon_reload=False daemon_reexec=False no_block=False enabled=None force=None masked=None Jul 07 20:17:01 managed-node2 systemd[60658]: Starting Wait for system level network-online.target as user.... ░░ Subject: A start job for unit UNIT has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has begun execution. ░░ ░░ The job identifier is 25. Jul 07 20:17:01 managed-node2 sh[61403]: active Jul 07 20:17:01 managed-node2 systemd[60658]: Finished Wait for system level network-online.target as user.. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 25. Jul 07 20:17:01 managed-node2 systemd[60658]: Starting quadlet-pod-pod-pod.service... ░░ Subject: A start job for unit UNIT has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has begun execution. ░░ ░░ The job identifier is 13. Jul 07 20:17:01 managed-node2 systemd[60658]: Starting D-Bus User Message Bus... ░░ Subject: A start job for unit UNIT has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has begun execution. ░░ ░░ The job identifier is 26. Jul 07 20:17:01 managed-node2 dbus-broker-launch[61428]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +31: Eavesdropping is deprecated and ignored Jul 07 20:17:01 managed-node2 systemd[60658]: Started D-Bus User Message Bus. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 26. Jul 07 20:17:01 managed-node2 dbus-broker-launch[61428]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +33: Eavesdropping is deprecated and ignored Jul 07 20:17:01 managed-node2 dbus-broker-lau[61428]: Ready Jul 07 20:17:01 managed-node2 systemd[60658]: Created slice Slice /user. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 32. Jul 07 20:17:01 managed-node2 systemd[60658]: Created slice cgroup user-libpod_pod_abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658.slice. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 31. Jul 07 20:17:01 managed-node2 quadlet-pod-pod-pod[61411]: abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658 Jul 07 20:17:01 managed-node2 systemd[60658]: podman-pause-d252ab55.scope: unit configures an IP firewall, but not running as root. Jul 07 20:17:01 managed-node2 systemd[60658]: (This warning is only shown for the first unit using IP firewalling.) Jul 07 20:17:01 managed-node2 systemd[60658]: Started podman-pause-d252ab55.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 35. Jul 07 20:17:01 managed-node2 systemd[60658]: Started libcrun container. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 39. Jul 07 20:17:01 managed-node2 quadlet-pod-pod-pod[61431]: quadlet-pod Jul 07 20:17:01 managed-node2 systemd[60658]: Started quadlet-pod-pod-pod.service. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 13. Jul 07 20:17:01 managed-node2 sudo[61397]: pam_unix(sudo:session): session closed for user user_quadlet_pod Jul 07 20:17:02 managed-node2 python3.9[61604]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:17:02 managed-node2 python3.9[61755]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:17:02 managed-node2 python3.9[61905]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:17:04 managed-node2 python3.9[62055]: ansible-ansible.legacy.command Invoked with creates=/var/lib/systemd/linger/user_quadlet_pod _raw_params=loginctl enable-linger user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None removes=None stdin=None Jul 07 20:17:04 managed-node2 sudo[62204]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:17:04 managed-node2 sudo[62204]: root : TTY=pts/0 ; PWD=/root ; USER=user_quadlet_pod ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-clqautntenvhsnblukvtrjsozolrrutk ; XDG_RUNTIME_DIR=/run/user/2223 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933824.2704933-20565-153115325770627/AnsiballZ_podman_image.py' Jul 07 20:17:04 managed-node2 sudo[62204]: pam_unix(sudo:session): session opened for user user_quadlet_pod(uid=2223) by root(uid=0) Jul 07 20:17:04 managed-node2 systemd[60658]: Started podman-62207.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 44. Jul 07 20:17:04 managed-node2 systemd[60658]: Started podman-62215.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 48. Jul 07 20:17:05 managed-node2 systemd[60658]: Started podman-62241.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 52. Jul 07 20:17:05 managed-node2 sudo[62204]: pam_unix(sudo:session): session closed for user user_quadlet_pod Jul 07 20:17:05 managed-node2 python3.9[62397]: ansible-file Invoked with path=/home/user_quadlet_pod/.config/containers/systemd state=directory owner=user_quadlet_pod group=2223 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:17:06 managed-node2 python3.9[62546]: ansible-ansible.legacy.stat Invoked with path=/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-container.container follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Jul 07 20:17:06 managed-node2 python3.9[62666]: ansible-ansible.legacy.copy Invoked with src=/root/.ansible/tmp/ansible-tmp-1751933826.0010831-20615-171147992892008/.source.container dest=/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-container.container owner=user_quadlet_pod group=2223 mode=0644 follow=False _original_basename=systemd.j2 checksum=f0b5c8159fc3c65bf9310a371751609e4c1ba4c3 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None Jul 07 20:17:06 managed-node2 sudo[62815]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:17:06 managed-node2 sudo[62815]: root : TTY=pts/0 ; PWD=/root ; USER=user_quadlet_pod ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wvdbdvervsgjztpuoyolguwsbeqeprfj ; XDG_RUNTIME_DIR=/run/user/2223 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933826.7366147-20636-253031789963643/AnsiballZ_systemd.py' Jul 07 20:17:06 managed-node2 sudo[62815]: pam_unix(sudo:session): session opened for user user_quadlet_pod(uid=2223) by root(uid=0) Jul 07 20:17:07 managed-node2 python3.9[62817]: ansible-systemd Invoked with daemon_reload=True scope=user daemon_reexec=False no_block=False name=None state=None enabled=None force=None masked=None Jul 07 20:17:07 managed-node2 systemd[60658]: Reloading. Jul 07 20:17:07 managed-node2 sudo[62815]: pam_unix(sudo:session): session closed for user user_quadlet_pod Jul 07 20:17:07 managed-node2 sudo[62977]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:17:07 managed-node2 sudo[62977]: root : TTY=pts/0 ; PWD=/root ; USER=user_quadlet_pod ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-uowxubkhbahtejstbxtpvjntqjsyenah ; XDG_RUNTIME_DIR=/run/user/2223 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933827.3300688-20652-5834244785399/AnsiballZ_systemd.py' Jul 07 20:17:07 managed-node2 sudo[62977]: pam_unix(sudo:session): session opened for user user_quadlet_pod(uid=2223) by root(uid=0) Jul 07 20:17:07 managed-node2 python3.9[62979]: ansible-systemd Invoked with name=quadlet-pod-container.service scope=user state=started daemon_reload=False daemon_reexec=False no_block=False enabled=None force=None masked=None Jul 07 20:17:07 managed-node2 systemd[60658]: Starting quadlet-pod-container.service... ░░ Subject: A start job for unit UNIT has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has begun execution. ░░ ░░ The job identifier is 56. Jul 07 20:17:07 managed-node2 systemd[60658]: Started quadlet-pod-container.service. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 56. Jul 07 20:17:07 managed-node2 quadlet-pod-container[62982]: b13865d1720ba2247935f8a907f43f29957f51e6b9e3476a39eb71da7ad9ebb6 Jul 07 20:17:07 managed-node2 sudo[62977]: pam_unix(sudo:session): session closed for user user_quadlet_pod Jul 07 20:17:08 managed-node2 python3.9[63145]: ansible-ansible.legacy.command Invoked with _raw_params=cat /home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-container.container _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:17:08 managed-node2 python3.9[63295]: ansible-ansible.legacy.command Invoked with _raw_params=cat /home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-pod.pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:17:09 managed-node2 sudo[63445]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:17:09 managed-node2 sudo[63445]: root : TTY=pts/0 ; PWD=/root ; USER=user_quadlet_pod ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-smtrjtflbvxbkabzbxqvalutkdqpmdkb ; XDG_RUNTIME_DIR=/run/user/2223 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933828.9926739-20712-141410485382073/AnsiballZ_command.py' Jul 07 20:17:09 managed-node2 sudo[63445]: pam_unix(sudo:session): session opened for user user_quadlet_pod(uid=2223) by root(uid=0) Jul 07 20:17:09 managed-node2 python3.9[63447]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod inspect quadlet-pod --format '{{range .Containers}}{{.Name}} {{end}}' _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:17:09 managed-node2 systemd[60658]: Started podman-63448.scope. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 70. Jul 07 20:17:09 managed-node2 sudo[63445]: pam_unix(sudo:session): session closed for user user_quadlet_pod Jul 07 20:17:09 managed-node2 python3.9[63604]: ansible-stat Invoked with path=/var/lib/systemd/linger/user_quadlet_pod follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:17:11 managed-node2 python3.9[63904]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:17:12 managed-node2 python3.9[64059]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:17:13 managed-node2 python3.9[64210]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:17:13 managed-node2 python3.9[64360]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:17:15 managed-node2 python3.9[64510]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:17:16 managed-node2 python3.9[64661]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:17:16 managed-node2 python3.9[64811]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:17:17 managed-node2 python3.9[64961]: ansible-stat Invoked with path=/run/user/2223 follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:17:18 managed-node2 sudo[65112]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:17:18 managed-node2 sudo[65112]: root : TTY=pts/0 ; PWD=/root ; USER=user_quadlet_pod ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mdrwdexxoegkvbfvoaccgnpcnrcnmlbz ; XDG_RUNTIME_DIR=/run/user/2223 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933838.0379803-20995-129290334053056/AnsiballZ_systemd.py' Jul 07 20:17:18 managed-node2 sudo[65112]: pam_unix(sudo:session): session opened for user user_quadlet_pod(uid=2223) by root(uid=0) Jul 07 20:17:18 managed-node2 python3.9[65114]: ansible-systemd Invoked with name=quadlet-pod-container.service scope=user state=stopped enabled=False force=True daemon_reload=False daemon_reexec=False no_block=False masked=None Jul 07 20:17:18 managed-node2 systemd[60658]: Reloading. Jul 07 20:17:18 managed-node2 systemd[60658]: Stopping quadlet-pod-container.service... ░░ Subject: A stop job for unit UNIT has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has begun execution. ░░ ░░ The job identifier is 74. Jul 07 20:17:28 managed-node2 quadlet-pod-container[65129]: time="2025-07-07T20:17:28-04:00" level=warning msg="StopSignal SIGTERM failed to stop container quadlet-pod-container in 10 seconds, resorting to SIGKILL" Jul 07 20:17:28 managed-node2 quadlet-pod-container[65129]: b13865d1720ba2247935f8a907f43f29957f51e6b9e3476a39eb71da7ad9ebb6 Jul 07 20:17:28 managed-node2 systemd[60658]: quadlet-pod-container.service: Main process exited, code=exited, status=137/n/a ░░ Subject: Unit process exited ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ An ExecStart= process belonging to unit UNIT has exited. ░░ ░░ The process' exit code is 'exited' and its exit status is 137. Jul 07 20:17:28 managed-node2 systemd[60658]: Removed slice cgroup user-libpod_pod_abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658.slice. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 75 and the job result is done. Jul 07 20:17:28 managed-node2 systemd[60658]: user-libpod_pod_abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658.slice: Failed to open /run/user/2223/systemd/transient/user-libpod_pod_abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658.slice: No such file or directory Jul 07 20:17:28 managed-node2 systemd[60658]: quadlet-pod-container.service: Failed with result 'exit-code'. ░░ Subject: Unit failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit UNIT has entered the 'failed' state with result 'exit-code'. Jul 07 20:17:28 managed-node2 systemd[60658]: Stopped quadlet-pod-container.service. ░░ Subject: A stop job for unit UNIT has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit UNIT has finished. ░░ ░░ The job identifier is 74 and the job result is done. Jul 07 20:17:28 managed-node2 systemd[60658]: user-libpod_pod_abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658.slice: Failed to open /run/user/2223/systemd/transient/user-libpod_pod_abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658.slice: No such file or directory Jul 07 20:17:28 managed-node2 quadlet-pod-pod-pod[65162]: quadlet-pod Jul 07 20:17:28 managed-node2 sudo[65112]: pam_unix(sudo:session): session closed for user user_quadlet_pod Jul 07 20:17:28 managed-node2 systemd[60658]: user-libpod_pod_abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658.slice: Failed to open /run/user/2223/systemd/transient/user-libpod_pod_abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658.slice: No such file or directory Jul 07 20:17:28 managed-node2 quadlet-pod-pod-pod[65176]: abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658 Jul 07 20:17:29 managed-node2 python3.9[65334]: ansible-stat Invoked with path=/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-container.container follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:17:30 managed-node2 python3.9[65634]: ansible-ansible.legacy.command Invoked with _raw_params=set -x set -o pipefail exec 1>&2 #podman volume rm --all #podman network prune -f podman volume ls podman network ls podman secret ls podman container ls podman pod ls podman images systemctl list-units | grep quadlet systemctl list-unit-files | grep quadlet ls -alrtF /etc/containers/systemd /usr/libexec/podman/quadlet -dryrun -v -no-kmsg-log _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:17:30 managed-node2 python3.9[65839]: ansible-ansible.legacy.command Invoked with _raw_params=grep type=AVC /var/log/audit/audit.log _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:17:31 managed-node2 python3.9[65989]: ansible-ansible.legacy.command Invoked with _raw_params=journalctl -ex _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:17:33 managed-node2 python3.9[66288]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:17:34 managed-node2 python3.9[66443]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:17:34 managed-node2 python3.9[66594]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:17:34 managed-node2 python3.9[66744]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:17:36 managed-node2 python3.9[66894]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:17:37 managed-node2 python3.9[67045]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:17:37 managed-node2 python3.9[67195]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Jul 07 20:17:38 managed-node2 python3.9[67345]: ansible-stat Invoked with path=/run/user/2223 follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:17:38 managed-node2 sudo[67496]: pam_unix(sudo:account): password for user root will expire in 0 days Jul 07 20:17:38 managed-node2 sudo[67496]: root : TTY=pts/0 ; PWD=/root ; USER=user_quadlet_pod ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rlcbsoretjgzvpwkphyhytxtddgruzuo ; XDG_RUNTIME_DIR=/run/user/2223 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933858.505194-21528-62534825907089/AnsiballZ_systemd.py' Jul 07 20:17:38 managed-node2 sudo[67496]: pam_unix(sudo:session): session opened for user user_quadlet_pod(uid=2223) by root(uid=0) Jul 07 20:17:38 managed-node2 python3.9[67498]: ansible-systemd Invoked with name=quadlet-pod-container.service scope=user state=stopped enabled=False force=True daemon_reload=False daemon_reexec=False no_block=False masked=None Jul 07 20:17:38 managed-node2 systemd[60658]: Reloading. Jul 07 20:17:38 managed-node2 sudo[67496]: pam_unix(sudo:session): session closed for user user_quadlet_pod Jul 07 20:17:39 managed-node2 python3.9[67660]: ansible-stat Invoked with path=/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-container.container follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Jul 07 20:17:40 managed-node2 python3.9[67960]: ansible-ansible.legacy.command Invoked with _raw_params=journalctl -ex _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None PLAY RECAP ********************************************************************* managed-node2 : ok=207 changed=14 unreachable=0 failed=2 skipped=237 rescued=2 ignored=0 SYSTEM ROLES ERRORS BEGIN v1 [ { "ansible_version": "2.17.12", "end_time": "2025-07-08T00:17:29.763521+00:00Z", "host": "managed-node2", "message": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "start_time": "2025-07-08T00:17:29.744633+00:00Z", "task_name": "Parse quadlet file", "task_path": "/tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/parse_quadlet_file.yml:12" }, { "ansible_version": "2.17.12", "delta": "0:00:00.039678", "end_time": "2025-07-07 20:17:31.360118", "host": "managed-node2", "message": "", "rc": 0, "start_time": "2025-07-07 20:17:31.320440", "stdout": "Jul 07 20:13:28 managed-node2 /usr/bin/podman[29671]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"backingFs=xfs, projectQuotaSupported=false, useNativeDiff=true, usingMetacopy=false\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29671]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Initializing event backend file\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29671]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Configured OCI runtime crun-wasm initialization failed: no valid executable found for OCI runtime crun-wasm: invalid argument\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29671]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Configured OCI runtime runc initialization failed: no valid executable found for OCI runtime runc: invalid argument\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29671]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Configured OCI runtime runj initialization failed: no valid executable found for OCI runtime runj: invalid argument\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29671]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Configured OCI runtime kata initialization failed: no valid executable found for OCI runtime kata: invalid argument\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29671]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Configured OCI runtime ocijail initialization failed: no valid executable found for OCI runtime ocijail: invalid argument\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29671]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Configured OCI runtime crun-vm initialization failed: no valid executable found for OCI runtime crun-vm: invalid argument\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29671]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Configured OCI runtime runsc initialization failed: no valid executable found for OCI runtime runsc: invalid argument\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29671]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Configured OCI runtime youki initialization failed: no valid executable found for OCI runtime youki: invalid argument\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29671]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Configured OCI runtime krun initialization failed: no valid executable found for OCI runtime krun: invalid argument\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29671]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Using OCI runtime \\\"/usr/bin/crun\\\"\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29671]: time=\"2025-07-07T20:13:28-04:00\" level=info msg=\"Setting parallel job count to 7\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29671]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Called cleanup.PersistentPostRunE(/usr/bin/podman --root /home/podman_basic_user/.local/share/containers/storage --runroot /run/user/3001/containers --log-level debug --cgroup-manager systemd --tmpdir /run/user/3001/libpod/tmp --network-config-dir --network-backend netavark --volumepath /home/podman_basic_user/.local/share/containers/storage/volumes --db-backend sqlite --transient-store=false --runtime crun --storage-driver overlay --events-backend file --syslog container cleanup --stopped-only 19edcb90781cf21e1139d2477490fcc52d381e9fa210ccb4a2abe674b2dc35a8)\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29671]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Shutting down engines\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29671]: time=\"2025-07-07T20:13:28-04:00\" level=info msg=\"Received shutdown.Stop(), terminating!\" PID=29671\nJul 07 20:13:28 managed-node2 aardvark-dns[29155]: Received SIGHUP\nJul 07 20:13:28 managed-node2 aardvark-dns[29155]: Successfully parsed config\nJul 07 20:13:28 managed-node2 aardvark-dns[29155]: Listen v4 ip {}\nJul 07 20:13:28 managed-node2 aardvark-dns[29155]: Listen v6 ip {}\nJul 07 20:13:28 managed-node2 aardvark-dns[29155]: No configuration found stopping the sever\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Called cleanup.PersistentPreRunE(/usr/bin/podman --root /home/podman_basic_user/.local/share/containers/storage --runroot /run/user/3001/containers --log-level debug --cgroup-manager systemd --tmpdir /run/user/3001/libpod/tmp --network-config-dir --network-backend netavark --volumepath /home/podman_basic_user/.local/share/containers/storage/volumes --db-backend sqlite --transient-store=false --runtime crun --storage-driver overlay --events-backend file --syslog container cleanup --stopped-only e9692bbfc519ef92cef48f387f6e39e18dec1d44e7caa03632016f9015c87147)\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Setting custom database backend: \\\"sqlite\\\"\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Using conmon: \\\"/usr/bin/conmon\\\"\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=info msg=\"Using sqlite as database backend\"\nJul 07 20:13:28 managed-node2 kernel: podman1: port 1(veth0) entered disabled state\nJul 07 20:13:28 managed-node2 kernel: veth0 (unregistering): left allmulticast mode\nJul 07 20:13:28 managed-node2 kernel: veth0 (unregistering): left promiscuous mode\nJul 07 20:13:28 managed-node2 kernel: podman1: port 1(veth0) entered disabled state\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"systemd-logind: Unknown object '/'.\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Using graph driver overlay\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Using graph root /home/podman_basic_user/.local/share/containers/storage\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Using run root /run/user/3001/containers\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Using static dir /home/podman_basic_user/.local/share/containers/storage/libpod\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Using tmp dir /run/user/3001/libpod/tmp\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Using volume path /home/podman_basic_user/.local/share/containers/storage/volumes\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Using transient store: false\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"[graphdriver] trying provided driver \\\"overlay\\\"\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Cached value indicated that overlay is supported\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Cached value indicated that overlay is supported\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Cached value indicated that metacopy is not being used\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Cached value indicated that native-diff is usable\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"backingFs=xfs, projectQuotaSupported=false, useNativeDiff=true, usingMetacopy=false\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Initializing event backend file\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Configured OCI runtime runj initialization failed: no valid executable found for OCI runtime runj: invalid argument\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Configured OCI runtime kata initialization failed: no valid executable found for OCI runtime kata: invalid argument\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Configured OCI runtime runsc initialization failed: no valid executable found for OCI runtime runsc: invalid argument\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Configured OCI runtime youki initialization failed: no valid executable found for OCI runtime youki: invalid argument\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Configured OCI runtime krun initialization failed: no valid executable found for OCI runtime krun: invalid argument\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Configured OCI runtime crun-wasm initialization failed: no valid executable found for OCI runtime crun-wasm: invalid argument\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Configured OCI runtime ocijail initialization failed: no valid executable found for OCI runtime ocijail: invalid argument\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Configured OCI runtime crun-vm initialization failed: no valid executable found for OCI runtime crun-vm: invalid argument\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Configured OCI runtime runc initialization failed: no valid executable found for OCI runtime runc: invalid argument\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Using OCI runtime \\\"/usr/bin/crun\\\"\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=info msg=\"Setting parallel job count to 7\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Called cleanup.PersistentPostRunE(/usr/bin/podman --root /home/podman_basic_user/.local/share/containers/storage --runroot /run/user/3001/containers --log-level debug --cgroup-manager systemd --tmpdir /run/user/3001/libpod/tmp --network-config-dir --network-backend netavark --volumepath /home/podman_basic_user/.local/share/containers/storage/volumes --db-backend sqlite --transient-store=false --runtime crun --storage-driver overlay --events-backend file --syslog container cleanup --stopped-only e9692bbfc519ef92cef48f387f6e39e18dec1d44e7caa03632016f9015c87147)\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Shutting down engines\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=info msg=\"Received shutdown.Stop(), terminating!\" PID=29684\nJul 07 20:13:28 managed-node2 systemd[27808]: Removed slice cgroup user-libpod_pod_0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f.slice.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 83 and the job result is done.\nJul 07 20:13:28 managed-node2 systemd[27808]: user-libpod_pod_0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f.slice: Failed to open /run/user/3001/systemd/transient/user-libpod_pod_0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f.slice: No such file or directory\nJul 07 20:13:28 managed-node2 systemd[27808]: user-libpod_pod_0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f.slice: Failed to open /run/user/3001/systemd/transient/user-libpod_pod_0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f.slice: No such file or directory\nJul 07 20:13:29 managed-node2 systemd[27808]: user-libpod_pod_0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f.slice: Failed to open /run/user/3001/systemd/transient/user-libpod_pod_0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f.slice: No such file or directory\nJul 07 20:13:29 managed-node2 podman[29660]: Pods stopped:\nJul 07 20:13:29 managed-node2 podman[29660]: 0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f\nJul 07 20:13:29 managed-node2 podman[29660]: Pods removed:\nJul 07 20:13:29 managed-node2 podman[29660]: 0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f\nJul 07 20:13:29 managed-node2 podman[29660]: Secrets removed:\nJul 07 20:13:29 managed-node2 podman[29660]: Volumes removed:\nJul 07 20:13:29 managed-node2 systemd[27808]: Created slice cgroup user-libpod_pod_a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4.slice.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 84.\nJul 07 20:13:29 managed-node2 systemd[27808]: Started libcrun container.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 88.\nJul 07 20:13:29 managed-node2 systemd[27808]: Started rootless-netns-281f12f7.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 92.\nJul 07 20:13:29 managed-node2 kernel: podman1: port 1(veth0) entered blocking state\nJul 07 20:13:29 managed-node2 kernel: podman1: port 1(veth0) entered disabled state\nJul 07 20:13:29 managed-node2 kernel: veth0: entered allmulticast mode\nJul 07 20:13:29 managed-node2 kernel: veth0: entered promiscuous mode\nJul 07 20:13:29 managed-node2 kernel: podman1: port 1(veth0) entered blocking state\nJul 07 20:13:29 managed-node2 kernel: podman1: port 1(veth0) entered forwarding state\nJul 07 20:13:29 managed-node2 systemd[27808]: Started /usr/libexec/podman/aardvark-dns --config /run/user/3001/containers/networks/aardvark-dns -p 53 run.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 96.\nJul 07 20:13:29 managed-node2 systemd[27808]: Started libcrun container.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 100.\nJul 07 20:13:29 managed-node2 systemd[27808]: Started libcrun container.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 105.\nJul 07 20:13:29 managed-node2 systemd[27808]: Started A template for running K8s workloads via podman-kube-play.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 70.\nJul 07 20:13:29 managed-node2 podman[29660]: Pod:\nJul 07 20:13:29 managed-node2 podman[29660]: a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4\nJul 07 20:13:29 managed-node2 podman[29660]: Container:\nJul 07 20:13:29 managed-node2 podman[29660]: 98a702eb9e86b0efc7d3e6878bf2b4db5ac6ff3d0bc5383014d2958ce12dced5\nJul 07 20:13:29 managed-node2 sudo[29655]: pam_unix(sudo:session): session closed for user podman_basic_user\nJul 07 20:13:30 managed-node2 python3.9[29983]: ansible-getent Invoked with database=passwd key=root fail_key=False service=None split=None\nJul 07 20:13:30 managed-node2 python3.9[30133]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:13:32 managed-node2 python3.9[30284]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/httpd2.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:13:33 managed-node2 python3.9[30434]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd2 state=directory owner=root group=root recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:13:33 managed-node2 python3.9[30583]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd2-create state=directory owner=root group=root recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:13:34 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state.\nJul 07 20:13:34 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state.\nJul 07 20:13:34 managed-node2 podman[30766]: 2025-07-07 20:13:34.583080898 -0400 EDT m=+0.387686363 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610\nJul 07 20:13:35 managed-node2 python3.9[30929]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd2.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:13:35 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state.\nJul 07 20:13:35 managed-node2 python3.9[31078]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d state=directory owner=root group=0 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:13:35 managed-node2 python3.9[31227]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd2.yml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True\nJul 07 20:13:36 managed-node2 python3.9[31347]: ansible-ansible.legacy.copy Invoked with dest=/etc/containers/ansible-kubernetes.d/httpd2.yml owner=root group=0 mode=0644 src=/root/.ansible/tmp/ansible-tmp-1751933615.542873-13335-149986577229905/.source.yml _original_basename=.op5axps4 follow=False checksum=ce164467a3a112a82832f62e0fdfcaf3c7eecdd1 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:13:36 managed-node2 python3.9[31496]: ansible-containers.podman.podman_play Invoked with state=started debug=True log_level=debug kube_file=/etc/containers/ansible-kubernetes.d/httpd2.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None quiet=None recreate=None userns=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None\nJul 07 20:13:36 managed-node2 podman[31503]: 2025-07-07 20:13:36.614627171 -0400 EDT m=+0.024918635 network create 0a842076c75338ed23c6283f120afac661325ee3d3188d4246c57fcbd0ff921c (name=podman-default-kube-network, type=bridge)\nJul 07 20:13:36 managed-node2 systemd[1]: Created slice cgroup machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice.\n\u2591\u2591 Subject: A start job for unit machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1383.\nJul 07 20:13:36 managed-node2 podman[31503]: 2025-07-07 20:13:36.657483611 -0400 EDT m=+0.067774829 container create b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 (image=, name=a89535868ec0-infra, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5)\nJul 07 20:13:36 managed-node2 podman[31503]: 2025-07-07 20:13:36.663455669 -0400 EDT m=+0.073746755 pod create a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5 (image=, name=httpd2)\nJul 07 20:13:36 managed-node2 podman[31503]: 2025-07-07 20:13:36.690624127 -0400 EDT m=+0.100915321 container create f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5, app=test, io.containers.autoupdate=registry, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0)\nJul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7101] manager: (podman1): new Bridge device (/org/freedesktop/NetworkManager/Devices/3)\nJul 07 20:13:36 managed-node2 kernel: podman1: port 1(veth0) entered blocking state\nJul 07 20:13:36 managed-node2 kernel: podman1: port 1(veth0) entered disabled state\nJul 07 20:13:36 managed-node2 kernel: veth0: entered allmulticast mode\nJul 07 20:13:36 managed-node2 podman[31503]: 2025-07-07 20:13:36.667763108 -0400 EDT m=+0.078054303 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610\nJul 07 20:13:36 managed-node2 kernel: veth0: entered promiscuous mode\nJul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7230] manager: (veth0): new Veth device (/org/freedesktop/NetworkManager/Devices/4)\nJul 07 20:13:36 managed-node2 kernel: podman1: port 1(veth0) entered blocking state\nJul 07 20:13:36 managed-node2 kernel: podman1: port 1(veth0) entered forwarding state\nJul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7287] device (veth0): carrier: link connected\nJul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7289] device (podman1): carrier: link connected\nJul 07 20:13:36 managed-node2 systemd-udevd[31526]: Network interface NamePolicy= disabled on kernel command line.\nJul 07 20:13:36 managed-node2 systemd-udevd[31524]: Network interface NamePolicy= disabled on kernel command line.\nJul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7661] device (podman1): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external')\nJul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7667] device (podman1): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'external')\nJul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7674] device (podman1): Activation: starting connection 'podman1' (eac731d7-3726-4468-a790-cf1c7402dd92)\nJul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7676] device (podman1): state change: disconnected -> prepare (reason 'none', managed-type: 'external')\nJul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7680] device (podman1): state change: prepare -> config (reason 'none', managed-type: 'external')\nJul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7682] device (podman1): state change: config -> ip-config (reason 'none', managed-type: 'external')\nJul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7686] device (podman1): state change: ip-config -> ip-check (reason 'none', managed-type: 'external')\nJul 07 20:13:36 managed-node2 systemd[1]: Starting Network Manager Script Dispatcher Service...\n\u2591\u2591 Subject: A start job for unit NetworkManager-dispatcher.service has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit NetworkManager-dispatcher.service has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1388.\nJul 07 20:13:36 managed-node2 systemd[1]: Started Network Manager Script Dispatcher Service.\n\u2591\u2591 Subject: A start job for unit NetworkManager-dispatcher.service has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit NetworkManager-dispatcher.service has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1388.\nJul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7916] device (podman1): state change: ip-check -> secondaries (reason 'none', managed-type: 'external')\nJul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7918] device (podman1): state change: secondaries -> activated (reason 'none', managed-type: 'external')\nJul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7923] device (podman1): Activation: successful, device activated.\nJul 07 20:13:36 managed-node2 systemd[1]: Started /usr/libexec/podman/aardvark-dns --config /run/containers/networks/aardvark-dns -p 53 run.\n\u2591\u2591 Subject: A start job for unit run-rf8a9b32703c44fe9919a21200707a783.scope has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit run-rf8a9b32703c44fe9919a21200707a783.scope has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1454.\nJul 07 20:13:36 managed-node2 aardvark-dns[31613]: starting aardvark on a child with pid 31614\nJul 07 20:13:36 managed-node2 aardvark-dns[31614]: Successfully parsed config\nJul 07 20:13:36 managed-node2 aardvark-dns[31614]: Listen v4 ip {\"podman-default-kube-network\": [10.89.0.1]}\nJul 07 20:13:36 managed-node2 aardvark-dns[31614]: Listen v6 ip {}\nJul 07 20:13:36 managed-node2 aardvark-dns[31614]: Using the following upstream servers: [10.29.169.13:53, 10.29.170.12:53, 10.2.32.1:53]\nJul 07 20:13:36 managed-node2 systemd[1]: Started libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope.\n\u2591\u2591 Subject: A start job for unit libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1458.\nJul 07 20:13:36 managed-node2 conmon[31618]: conmon b54360e34ffcfca4fbf3 : addr{sun_family=AF_UNIX, sun_path=/proc/self/fd/12/attach}\nJul 07 20:13:36 managed-node2 conmon[31618]: conmon b54360e34ffcfca4fbf3 : terminal_ctrl_fd: 12\nJul 07 20:13:36 managed-node2 conmon[31618]: conmon b54360e34ffcfca4fbf3 : winsz read side: 16, winsz write side: 17\nJul 07 20:13:36 managed-node2 systemd[1]: Started libcrun container.\n\u2591\u2591 Subject: A start job for unit libpod-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit libpod-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1463.\nJul 07 20:13:36 managed-node2 conmon[31618]: conmon b54360e34ffcfca4fbf3 : container PID: 31620\nJul 07 20:13:36 managed-node2 podman[31503]: 2025-07-07 20:13:36.960118446 -0400 EDT m=+0.370409669 container init b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 (image=, name=a89535868ec0-infra, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5)\nJul 07 20:13:36 managed-node2 podman[31503]: 2025-07-07 20:13:36.963833697 -0400 EDT m=+0.374124951 container start b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 (image=, name=a89535868ec0-infra, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5)\nJul 07 20:13:36 managed-node2 systemd[1]: Started libpod-conmon-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope.\n\u2591\u2591 Subject: A start job for unit libpod-conmon-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit libpod-conmon-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1468.\nJul 07 20:13:36 managed-node2 conmon[31623]: conmon f451c929f398dfcc2303 : addr{sun_family=AF_UNIX, sun_path=/proc/self/fd/11/attach}\nJul 07 20:13:36 managed-node2 conmon[31623]: conmon f451c929f398dfcc2303 : terminal_ctrl_fd: 11\nJul 07 20:13:36 managed-node2 conmon[31623]: conmon f451c929f398dfcc2303 : winsz read side: 15, winsz write side: 16\nJul 07 20:13:37 managed-node2 systemd[1]: Started libcrun container.\n\u2591\u2591 Subject: A start job for unit libpod-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit libpod-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1473.\nJul 07 20:13:37 managed-node2 conmon[31623]: conmon f451c929f398dfcc2303 : container PID: 31625\nJul 07 20:13:37 managed-node2 podman[31503]: 2025-07-07 20:13:37.017137848 -0400 EDT m=+0.427429053 container init f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5, io.buildah.version=1.21.0, io.containers.autoupdate=registry, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage)\nJul 07 20:13:37 managed-node2 podman[31503]: 2025-07-07 20:13:37.020217845 -0400 EDT m=+0.430509047 container start f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5, io.buildah.version=1.21.0, io.containers.autoupdate=registry, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage)\nJul 07 20:13:37 managed-node2 podman[31503]: 2025-07-07 20:13:37.026166839 -0400 EDT m=+0.436457992 pod start a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5 (image=, name=httpd2)\nJul 07 20:13:37 managed-node2 python3.9[31496]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE command: /usr/bin/podman play kube --start=true --log-level=debug /etc/containers/ansible-kubernetes.d/httpd2.yml\nJul 07 20:13:37 managed-node2 python3.9[31496]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stdout: Pod:\n a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5\n Container:\n f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06\n \nJul 07 20:13:37 managed-node2 python3.9[31496]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stderr: time=\"2025-07-07T20:13:36-04:00\" level=info msg=\"/usr/bin/podman filtering at log level debug\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Called kube.PersistentPreRunE(/usr/bin/podman play kube --start=true --log-level=debug /etc/containers/ansible-kubernetes.d/httpd2.yml)\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Using conmon: \\\"/usr/bin/conmon\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=info msg=\"Using sqlite as database backend\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Using graph driver overlay\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Using graph root /var/lib/containers/storage\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Using run root /run/containers/storage\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Using static dir /var/lib/containers/storage/libpod\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Using tmp dir /run/libpod\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Using volume path /var/lib/containers/storage/volumes\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Using transient store: false\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"[graphdriver] trying provided driver \\\"overlay\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Cached value indicated that overlay is supported\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Cached value indicated that overlay is supported\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Cached value indicated that metacopy is being used\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Cached value indicated that native-diff is not being used\"\n time=\"2025-07-07T20:13:36-04:00\" level=info msg=\"Not using native diff for overlay, this may cause degraded performance for building images: kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"backingFs=xfs, projectQuotaSupported=false, useNativeDiff=false, usingMetacopy=true\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Initializing event backend journald\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Configured OCI runtime runc initialization failed: no valid executable found for OCI runtime runc: invalid argument\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Configured OCI runtime runj initialization failed: no valid executable found for OCI runtime runj: invalid argument\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Configured OCI runtime kata initialization failed: no valid executable found for OCI runtime kata: invalid argument\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Configured OCI runtime runsc initialization failed: no valid executable found for OCI runtime runsc: invalid argument\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Configured OCI runtime krun initialization failed: no valid executable found for OCI runtime krun: invalid argument\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Configured OCI runtime ocijail initialization failed: no valid executable found for OCI runtime ocijail: invalid argument\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Configured OCI runtime crun-vm initialization failed: no valid executable found for OCI runtime crun-vm: invalid argument\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Configured OCI runtime youki initialization failed: no valid executable found for OCI runtime youki: invalid argument\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Configured OCI runtime crun-wasm initialization failed: no valid executable found for OCI runtime crun-wasm: invalid argument\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Using OCI runtime \\\"/usr/bin/crun\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=info msg=\"Setting parallel job count to 7\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Successfully loaded network podman-default-kube-network: &{podman-default-kube-network 0a842076c75338ed23c6283f120afac661325ee3d3188d4246c57fcbd0ff921c bridge podman1 2025-07-07 20:11:21.084048926 -0400 EDT [{{{10.89.0.0 ffffff00}} 10.89.0.1 }] [] false false true [] map[] map[] map[driver:host-local]}\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Successfully loaded 2 networks\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Pod using bridge network mode\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Created cgroup path machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice for parent machine.slice and name libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Created cgroup machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Got pod cgroup as machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"no command or entrypoint provided, and no CMD or ENTRYPOINT from image: defaulting to empty string\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"using systemd mode: false\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"setting container name a89535868ec0-infra\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Loading seccomp profile from \\\"/usr/share/containers/seccomp.json\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Allocated lock 1 for container b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Cached value indicated that idmapped mounts for overlay are supported\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Created container \\\"b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Container \\\"b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2\\\" has work directory \\\"/var/lib/containers/storage/overlay-containers/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2/userdata\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Container \\\"b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2\\\" has run directory \\\"/run/containers/storage/overlay-containers/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2/userdata\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Looking up image \\\"quay.io/libpod/testimage:20210610\\\" in local containers storage\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Normalized platform linux/amd64 to {amd64 linux [] }\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Trying \\\"quay.io/libpod/testimage:20210610\\\" ...\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"parsed reference into \\\"[overlay@/var/lib/containers/storage+/run/containers/storage:overlay.mountopt=nodev,metacopy=on]@9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Found image \\\"quay.io/libpod/testimage:20210610\\\" as \\\"quay.io/libpod/testimage:20210610\\\" in local containers storage\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Found image \\\"quay.io/libpod/testimage:20210610\\\" as \\\"quay.io/libpod/testimage:20210610\\\" in local containers storage ([overlay@/var/lib/containers/storage+/run/containers/storage:overlay.mountopt=nodev,metacopy=on]@9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f)\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"exporting opaque data as blob \\\"sha256:9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Pulling image quay.io/libpod/testimage:20210610 (policy: missing)\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Looking up image \\\"quay.io/libpod/testimage:20210610\\\" in local containers storage\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Normalized platform linux/amd64 to {amd64 linux [] }\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Trying \\\"quay.io/libpod/testimage:20210610\\\" ...\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"parsed reference into \\\"[overlay@/var/lib/containers/storage+/run/containers/storage:overlay.mountopt=nodev,metacopy=on]@9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Found image \\\"quay.io/libpod/testimage:20210610\\\" as \\\"quay.io/libpod/testimage:20210610\\\" in local containers storage\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Found image \\\"quay.io/libpod/testimage:20210610\\\" as \\\"quay.io/libpod/testimage:20210610\\\" in local containers storage ([overlay@/var/lib/containers/storage+/run/containers/storage:overlay.mountopt=nodev,metacopy=on]@9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f)\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"exporting opaque data as blob \\\"sha256:9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Inspecting image 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"exporting opaque data as blob \\\"sha256:9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"exporting opaque data as blob \\\"sha256:9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Looking up image \\\"quay.io/libpod/testimage:20210610\\\" in local containers storage\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Normalized platform linux/amd64 to {amd64 linux [] }\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Trying \\\"quay.io/libpod/testimage:20210610\\\" ...\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"parsed reference into \\\"[overlay@/var/lib/containers/storage+/run/containers/storage:overlay.mountopt=nodev,metacopy=on]@9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Found image \\\"quay.io/libpod/testimage:20210610\\\" as \\\"quay.io/libpod/testimage:20210610\\\" in local containers storage\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Found image \\\"quay.io/libpod/testimage:20210610\\\" as \\\"quay.io/libpod/testimage:20210610\\\" in local containers storage ([overlay@/var/lib/containers/storage+/run/containers/storage:overlay.mountopt=nodev,metacopy=on]@9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f)\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"exporting opaque data as blob \\\"sha256:9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Inspecting image 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"exporting opaque data as blob \\\"sha256:9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"exporting opaque data as blob \\\"sha256:9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Inspecting image 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Inspecting image 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"using systemd mode: false\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"adding container to pod httpd2\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"setting container name httpd2-httpd2\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Loading seccomp profile from \\\"/usr/share/containers/seccomp.json\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=info msg=\"Sysctl net.ipv4.ping_group_range=0 0 ignored in containers.conf, since Network Namespace set to host\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Adding mount /proc\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Adding mount /dev\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Adding mount /dev/pts\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Adding mount /dev/mqueue\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Adding mount /sys\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Adding mount /sys/fs/cgroup\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Allocated lock 2 for container f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"exporting opaque data as blob \\\"sha256:9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Created container \\\"f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Container \\\"f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06\\\" has work directory \\\"/var/lib/containers/storage/overlay-containers/f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06/userdata\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Container \\\"f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06\\\" has run directory \\\"/run/containers/storage/overlay-containers/f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06/userdata\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Strongconnecting node b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Pushed b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 onto stack\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Finishing node b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2. Popped b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 off stack\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Strongconnecting node f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Pushed f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 onto stack\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Finishing node f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06. Popped f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 off stack\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Made network namespace at /run/netns/netns-3dcd885d-1b51-2e38-72ff-33596f02c329 for container b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2\"\n [DEBUG netavark::network::validation] Validating network namespace...\n [DEBUG netavark::commands::setup] Setting up...\n [INFO netavark::firewall] Using iptables firewall driver\n [DEBUG netavark::network::bridge] Setup network podman-default-kube-network\n [DEBUG netavark::network::bridge] Container interface name: eth0 with IP addresses [10.89.0.2/24]\n [DEBUG netavark::network::bridge] Bridge name: podman1 with IP addresses [10.89.0.1/24]\n [DEBUG netavark::network::core_utils] Setting sysctl value for net.ipv4.ip_forward to 1\n [DEBUG netavark::network::core_utils] Setting sysctl value for /proc/sys/net/ipv4/conf/podman1/rp_filter to 2\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Created root filesystem for container b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 at /var/lib/containers/storage/overlay-containers/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2/rootfs/merge\"\n [DEBUG netavark::network::core_utils] Setting sysctl value for /proc/sys/net/ipv6/conf/eth0/autoconf to 0\n [DEBUG netavark::network::core_utils] Setting sysctl value for /proc/sys/net/ipv4/conf/eth0/arp_notify to 1\n [DEBUG netavark::network::core_utils] Setting sysctl value for /proc/sys/net/ipv4/conf/eth0/rp_filter to 2\n [INFO netavark::network::netlink] Adding route (dest: 0.0.0.0/0 ,gw: 10.89.0.1, metric 100)\n [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK-4B9D9135B29BA created on table nat\n [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK_ISOLATION_2 created on table filter\n [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK_ISOLATION_3 created on table filter\n [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK_INPUT created on table filter\n [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK_FORWARD created on table filter\n [DEBUG netavark::firewall::varktables::helpers] rule -d 10.89.0.0/24 -j ACCEPT created on table nat and chain NETAVARK-4B9D9135B29BA\n [DEBUG netavark::firewall::varktables::helpers] rule ! -d 224.0.0.0/4 -j MASQUERADE created on table nat and chain NETAVARK-4B9D9135B29BA\n [DEBUG netavark::firewall::varktables::helpers] rule -s 10.89.0.0/24 -j NETAVARK-4B9D9135B29BA created on table nat and chain POSTROUTING\n [DEBUG netavark::firewall::varktables::helpers] rule -p udp -s 10.89.0.0/24 --dport 53 -j ACCEPT created on table filter and chain NETAVARK_INPUT\n [DEBUG netavark::firewall::varktables::helpers] rule -p tcp -s 10.89.0.0/24 --dport 53 -j ACCEPT created on table filter and chain NETAVARK_INPUT\n [DEBUG netavark::firewall::varktables::helpers] rule -m conntrack --ctstate INVALID -j DROP created on table filter and chain NETAVARK_FORWARD\n [DEBUG netavark::firewall::varktables::helpers] rule -d 10.89.0.0/24 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT created on table filter and chain NETAVARK_FORWARD\n [DEBUG netavark::firewall::varktables::helpers] rule -s 10.89.0.0/24 -j ACCEPT created on table filter and chain NETAVARK_FORWARD\n [DEBUG netavark::firewall::firewalld] Adding firewalld rules for network 10.89.0.0/24\n [DEBUG netavark::firewall::firewalld] Adding subnet 10.89.0.0/24 to zone trusted as source\n [DEBUG netavark::network::core_utils] Setting sysctl value for net.ipv4.conf.podman1.route_localnet to 1\n [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK-HOSTPORT-SETMARK created on table nat\n [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK-HOSTPORT-MASQ created on table nat\n [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK-DN-4B9D9135B29BA created on table nat\n [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK-HOSTPORT-DNAT created on table nat\n [DEBUG netavark::firewall::varktables::helpers] rule -j MARK --set-xmark 0x2000/0x2000 created on table nat and chain NETAVARK-HOSTPORT-SETMARK\n [DEBUG netavark::firewall::varktables::helpers] rule -j MASQUERADE -m comment --comment 'netavark portfw masq mark' -m mark --mark 0x2000/0x2000 created on table nat and chain NETAVARK-HOSTPORT-MASQ\n [DEBUG netavark::firewall::varktables::helpers] rule -j NETAVARK-HOSTPORT-SETMARK -s 10.89.0.0/24 -p tcp --dport 15002 created on table nat and chain NETAVARK-DN-4B9D9135B29BA\n [DEBUG netavark::firewall::varktables::helpers] rule -j NETAVARK-HOSTPORT-SETMARK -s 127.0.0.1 -p tcp --dport 15002 created on table nat and chain NETAVARK-DN-4B9D9135B29BA\n [DEBUG netavark::firewall::varktables::helpers] rule -j DNAT -p tcp --to-destination 10.89.0.2:80 --destination-port 15002 created on table nat and chain NETAVARK-DN-4B9D9135B29BA\n [DEBUG netavark::firewall::varktables::helpers] rule -j NETAVARK-DN-4B9D9135B29BA -p tcp --dport 15002 -m comment --comment 'dnat name: podman-default-kube-network id: b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2' created on table nat and chain NETAVARK-HOSTPORT-DNAT\n [DEBUG netavark::firewall::varktables::helpers] rule -j NETAVARK-HOSTPORT-DNAT -m addrtype --dst-type LOCAL created on table nat and chain PREROUTING\n [DEBUG netavark::firewall::varktables::helpers] rule -j NETAVARK-HOSTPORT-DNAT -m addrtype --dst-type LOCAL created on table nat and chain OUTPUT\n [DEBUG netavark::dns::aardvark] Spawning aardvark server\n [DEBUG netavark::dns::aardvark] start aardvark-dns: [\"systemd-run\", \"-q\", \"--scope\", \"/usr/libexec/podman/aardvark-dns\", \"--config\", \"/run/containers/networks/aardvark-dns\", \"-p\", \"53\", \"run\"]\n [DEBUG netavark::commands::setup] {\n \"podman-default-kube-network\": StatusBlock {\n dns_search_domains: Some(\n [\n \"dns.podman\",\n ],\n ),\n dns_server_ips: Some(\n [\n 10.89.0.1,\n ],\n ),\n interfaces: Some(\n {\n \"eth0\": NetInterface {\n mac_address: \"ce:5c:7c:33:0d:65\",\n subnets: Some(\n [\n NetAddress {\n gateway: Some(\n 10.89.0.1,\n ),\n ipnet: 10.89.0.2/24,\n },\n ],\n ),\n },\n },\n ),\n },\n }\n [DEBUG netavark::commands::setup] Setup complete\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"/proc/sys/crypto/fips_enabled does not contain '1', not adding FIPS mode bind mounts\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Setting Cgroups for container b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 to machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice:libpod:b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"reading hooks from /usr/share/containers/oci/hooks.d\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Workdir \\\"/\\\" resolved to host path \\\"/var/lib/containers/storage/overlay-containers/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2/rootfs/merge\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Created OCI spec for container b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 at /var/lib/containers/storage/overlay-containers/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2/userdata/config.json\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Created cgroup path machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice for parent machine.slice and name libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Created cgroup machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Got pod cgroup as machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"/usr/bin/conmon messages will be logged to syslog\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"running conmon: /usr/bin/conmon\" args=\"[--api-version 1 -c b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 -u b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2/userdata -p /run/containers/storage/overlay-containers/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2/userdata/pidfile -n a89535868ec0-infra --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 --full-attach -s -l journald --log-level debug --syslog --conmon-pidfile /run/containers/storage/overlay-containers/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2/userdata/conmon.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg debug --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg --syslog --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2]\"\n time=\"2025-07-07T20:13:36-04:00\" level=info msg=\"Running conmon under slice machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice and unitName libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Received: 31620\"\n time=\"2025-07-07T20:13:36-04:00\" level=info msg=\"Got Conmon PID as 31618\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Created container b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 in OCI runtime\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Adding nameserver(s) from network status of '[\\\"10.89.0.1\\\"]'\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Adding search domain(s) from network status of '[\\\"dns.podman\\\"]'\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Starting container b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 with command [/catatonit -P]\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Started container b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"overlay: mount_data=lowerdir=/var/lib/containers/storage/overlay/l/BSXLDW6S4QQFLDJH6Z45ODLX6A,upperdir=/var/lib/containers/storage/overlay/031fa2927da1e19a93d45dbb8f74acff9ff44a2167616de52d9d6bf7a2e6be26/diff,workdir=/var/lib/containers/storage/overlay/031fa2927da1e19a93d45dbb8f74acff9ff44a2167616de52d9d6bf7a2e6be26/work,nodev,metacopy=on,context=\\\"system_u:object_r:container_file_t:s0:c263,c753\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Mounted container \\\"f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06\\\" at \\\"/var/lib/containers/storage/overlay/031fa2927da1e19a93d45dbb8f74acff9ff44a2167616de52d9d6bf7a2e6be26/merged\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Created root filesystem for container f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 at /var/lib/containers/storage/overlay/031fa2927da1e19a93d45dbb8f74acff9ff44a2167616de52d9d6bf7a2e6be26/merged\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"/proc/sys/crypto/fips_enabled does not contain '1', not adding FIPS mode bind mounts\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Setting Cgroups for container f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 to machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice:libpod:f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"reading hooks from /usr/share/containers/oci/hooks.d\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Workdir \\\"/var/www\\\" resolved to a volume or mount\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Created OCI spec for container f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 at /var/lib/containers/storage/overlay-containers/f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06/userdata/config.json\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Created cgroup path machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice for parent machine.slice and name libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Created cgroup machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Got pod cgroup as machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"/usr/bin/conmon messages will be logged to syslog\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"running conmon: /usr/bin/conmon\" args=\"[--api-version 1 -c f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 -u f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06/userdata -p /run/containers/storage/overlay-containers/f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06/userdata/pidfile -n httpd2-httpd2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 --full-attach -s -l journald --log-level debug --syslog --conmon-pidfile /run/containers/storage/overlay-containers/f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06/userdata/conmon.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg debug --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg --syslog --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06]\"\n time=\"2025-07-07T20:13:36-04:00\" level=info msg=\"Running conmon under slice machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice and unitName libpod-conmon-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope\"\n time=\"2025-07-07T20:13:37-04:00\" level=debug msg=\"Received: 31625\"\n time=\"2025-07-07T20:13:37-04:00\" level=info msg=\"Got Conmon PID as 31623\"\n time=\"2025-07-07T20:13:37-04:00\" level=debug msg=\"Created container f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 in OCI runtime\"\n time=\"2025-07-07T20:13:37-04:00\" level=debug msg=\"Starting container f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 with command [/bin/busybox-extras httpd -f -p 80]\"\n time=\"2025-07-07T20:13:37-04:00\" level=debug msg=\"Started container f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06\"\n time=\"2025-07-07T20:13:37-04:00\" level=debug msg=\"Called kube.PersistentPostRunE(/usr/bin/podman play kube --start=true --log-level=debug /etc/containers/ansible-kubernetes.d/httpd2.yml)\"\n time=\"2025-07-07T20:13:37-04:00\" level=debug msg=\"Shutting down engines\"\nJul 07 20:13:37 managed-node2 python3.9[31496]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE rc: 0\nJul 07 20:13:37 managed-node2 python3.9[31775]: ansible-systemd Invoked with daemon_reload=True scope=system daemon_reexec=False no_block=False name=None state=None enabled=None force=None masked=None\nJul 07 20:13:37 managed-node2 systemd[1]: Reloading.\nJul 07 20:13:37 managed-node2 systemd-rc-local-generator[31792]: /etc/rc.d/rc.local is not marked executable, skipping.\nJul 07 20:13:38 managed-node2 python3.9[31958]: ansible-systemd Invoked with name=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service scope=system enabled=True daemon_reload=False daemon_reexec=False no_block=False state=None force=None masked=None\nJul 07 20:13:38 managed-node2 systemd[1]: Reloading.\nJul 07 20:13:38 managed-node2 systemd-rc-local-generator[31978]: /etc/rc.d/rc.local is not marked executable, skipping.\nJul 07 20:13:39 managed-node2 python3.9[32143]: ansible-systemd Invoked with name=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service scope=system state=started daemon_reload=False daemon_reexec=False no_block=False enabled=None force=None masked=None\nJul 07 20:13:39 managed-node2 systemd[1]: Created slice Slice /system/podman-kube.\n\u2591\u2591 Subject: A start job for unit system-podman\\x2dkube.slice has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit system-podman\\x2dkube.slice has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1479.\nJul 07 20:13:39 managed-node2 systemd[1]: Starting A template for running K8s workloads via podman-kube-play...\n\u2591\u2591 Subject: A start job for unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1478.\nJul 07 20:13:39 managed-node2 podman[32147]: 2025-07-07 20:13:39.145431853 -0400 EDT m=+0.025527029 pod stop a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5 (image=, name=httpd2)\nJul 07 20:13:46 managed-node2 systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit NetworkManager-dispatcher.service has successfully entered the 'dead' state.\nJul 07 20:13:49 managed-node2 podman[32147]: time=\"2025-07-07T20:13:49-04:00\" level=warning msg=\"StopSignal SIGTERM failed to stop container httpd2-httpd2 in 10 seconds, resorting to SIGKILL\"\nJul 07 20:13:49 managed-node2 systemd[1]: libpod-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit libpod-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope has successfully entered the 'dead' state.\nJul 07 20:13:49 managed-node2 conmon[31623]: conmon f451c929f398dfcc2303 : container 31625 exited with status 137\nJul 07 20:13:49 managed-node2 conmon[31623]: conmon f451c929f398dfcc2303 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice/libpod-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope/container/memory.events\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.174232474 -0400 EDT m=+10.054327720 container died f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, io.containers.autoupdate=registry, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0)\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Called cleanup.PersistentPreRunE(/usr/bin/podman --root /var/lib/containers/storage --runroot /run/containers/storage --log-level debug --cgroup-manager systemd --tmpdir /run/libpod --network-config-dir --network-backend netavark --volumepath /var/lib/containers/storage/volumes --db-backend sqlite --transient-store=false --runtime crun --storage-driver overlay --storage-opt overlay.mountopt=nodev,metacopy=on --events-backend journald --syslog container cleanup --stopped-only f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06)\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Setting custom database backend: \\\"sqlite\\\"\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using conmon: \\\"/usr/bin/conmon\\\"\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=info msg=\"Using sqlite as database backend\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using graph driver overlay\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using graph root /var/lib/containers/storage\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using run root /run/containers/storage\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using static dir /var/lib/containers/storage/libpod\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using tmp dir /run/libpod\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using volume path /var/lib/containers/storage/volumes\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using transient store: false\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"[graphdriver] trying provided driver \\\"overlay\\\"\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Cached value indicated that overlay is supported\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Cached value indicated that overlay is supported\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Cached value indicated that metacopy is being used\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Cached value indicated that native-diff is not being used\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=info msg=\"Not using native diff for overlay, this may cause degraded performance for building images: kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"backingFs=xfs, projectQuotaSupported=false, useNativeDiff=false, usingMetacopy=true\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Initializing event backend journald\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime runc initialization failed: no valid executable found for OCI runtime runc: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime runj initialization failed: no valid executable found for OCI runtime runj: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime kata initialization failed: no valid executable found for OCI runtime kata: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime runsc initialization failed: no valid executable found for OCI runtime runsc: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime ocijail initialization failed: no valid executable found for OCI runtime ocijail: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime crun-vm initialization failed: no valid executable found for OCI runtime crun-vm: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime crun-wasm initialization failed: no valid executable found for OCI runtime crun-wasm: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime youki initialization failed: no valid executable found for OCI runtime youki: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime krun initialization failed: no valid executable found for OCI runtime krun: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using OCI runtime \\\"/usr/bin/crun\\\"\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=info msg=\"Setting parallel job count to 7\"\nJul 07 20:13:49 managed-node2 systemd[1]: var-lib-containers-storage-overlay-031fa2927da1e19a93d45dbb8f74acff9ff44a2167616de52d9d6bf7a2e6be26-merged.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay-031fa2927da1e19a93d45dbb8f74acff9ff44a2167616de52d9d6bf7a2e6be26-merged.mount has successfully entered the 'dead' state.\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.219658211 -0400 EDT m=+10.099753380 container cleanup f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry, app=test)\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Called cleanup.PersistentPostRunE(/usr/bin/podman --root /var/lib/containers/storage --runroot /run/containers/storage --log-level debug --cgroup-manager systemd --tmpdir /run/libpod --network-config-dir --network-backend netavark --volumepath /var/lib/containers/storage/volumes --db-backend sqlite --transient-store=false --runtime crun --storage-driver overlay --storage-opt overlay.mountopt=nodev,metacopy=on --events-backend journald --syslog container cleanup --stopped-only f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06)\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Shutting down engines\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=info msg=\"Received shutdown.Stop(), terminating!\" PID=32158\nJul 07 20:13:49 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state.\nJul 07 20:13:49 managed-node2 systemd[1]: libpod-conmon-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit libpod-conmon-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope has successfully entered the 'dead' state.\nJul 07 20:13:49 managed-node2 systemd[1]: libpod-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit libpod-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has successfully entered the 'dead' state.\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.241852628 -0400 EDT m=+10.121948152 container died b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 (image=, name=a89535868ec0-infra)\nJul 07 20:13:49 managed-node2 aardvark-dns[31614]: Received SIGHUP\nJul 07 20:13:49 managed-node2 systemd[1]: run-rf8a9b32703c44fe9919a21200707a783.scope: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit run-rf8a9b32703c44fe9919a21200707a783.scope has successfully entered the 'dead' state.\nJul 07 20:13:49 managed-node2 aardvark-dns[31614]: Successfully parsed config\nJul 07 20:13:49 managed-node2 aardvark-dns[31614]: Listen v4 ip {}\nJul 07 20:13:49 managed-node2 aardvark-dns[31614]: Listen v6 ip {}\nJul 07 20:13:49 managed-node2 aardvark-dns[31614]: No configuration found stopping the sever\nJul 07 20:13:49 managed-node2 kernel: podman1: port 1(veth0) entered disabled state\nJul 07 20:13:49 managed-node2 kernel: veth0 (unregistering): left allmulticast mode\nJul 07 20:13:49 managed-node2 kernel: veth0 (unregistering): left promiscuous mode\nJul 07 20:13:49 managed-node2 kernel: podman1: port 1(veth0) entered disabled state\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Called cleanup.PersistentPreRunE(/usr/bin/podman --root /var/lib/containers/storage --runroot /run/containers/storage --log-level debug --cgroup-manager systemd --tmpdir /run/libpod --network-config-dir --network-backend netavark --volumepath /var/lib/containers/storage/volumes --db-backend sqlite --transient-store=false --runtime crun --storage-driver overlay --storage-opt overlay.mountopt=nodev,metacopy=on --events-backend journald --syslog container cleanup --stopped-only b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2)\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Setting custom database backend: \\\"sqlite\\\"\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using conmon: \\\"/usr/bin/conmon\\\"\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=info msg=\"Using sqlite as database backend\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using graph driver overlay\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using graph root /var/lib/containers/storage\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using run root /run/containers/storage\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using static dir /var/lib/containers/storage/libpod\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using tmp dir /run/libpod\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using volume path /var/lib/containers/storage/volumes\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using transient store: false\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"[graphdriver] trying provided driver \\\"overlay\\\"\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Cached value indicated that overlay is supported\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Cached value indicated that overlay is supported\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Cached value indicated that metacopy is being used\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Cached value indicated that native-diff is not being used\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=info msg=\"Not using native diff for overlay, this may cause degraded performance for building images: kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"backingFs=xfs, projectQuotaSupported=false, useNativeDiff=false, usingMetacopy=true\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Initializing event backend journald\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime kata initialization failed: no valid executable found for OCI runtime kata: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime runsc initialization failed: no valid executable found for OCI runtime runsc: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime krun initialization failed: no valid executable found for OCI runtime krun: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime ocijail initialization failed: no valid executable found for OCI runtime ocijail: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime crun-vm initialization failed: no valid executable found for OCI runtime crun-vm: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime crun-wasm initialization failed: no valid executable found for OCI runtime crun-wasm: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime youki initialization failed: no valid executable found for OCI runtime youki: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime runc initialization failed: no valid executable found for OCI runtime runc: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime runj initialization failed: no valid executable found for OCI runtime runj: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using OCI runtime \\\"/usr/bin/crun\\\"\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=info msg=\"Setting parallel job count to 7\"\nJul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.2949] device (podman1): state change: activated -> unmanaged (reason 'unmanaged', managed-type: 'removed')\nJul 07 20:13:49 managed-node2 systemd[1]: Starting Network Manager Script Dispatcher Service...\n\u2591\u2591 Subject: A start job for unit NetworkManager-dispatcher.service has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit NetworkManager-dispatcher.service has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1551.\nJul 07 20:13:49 managed-node2 systemd[1]: Started Network Manager Script Dispatcher Service.\n\u2591\u2591 Subject: A start job for unit NetworkManager-dispatcher.service has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit NetworkManager-dispatcher.service has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1551.\nJul 07 20:13:49 managed-node2 systemd[1]: run-netns-netns\\x2d3dcd885d\\x2d1b51\\x2d2e38\\x2d72ff\\x2d33596f02c329.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit run-netns-netns\\x2d3dcd885d\\x2d1b51\\x2d2e38\\x2d72ff\\x2d33596f02c329.mount has successfully entered the 'dead' state.\nJul 07 20:13:49 managed-node2 systemd[1]: var-lib-containers-storage-overlay\\x2dcontainers-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2-rootfs-merge.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay\\x2dcontainers-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2-rootfs-merge.mount has successfully entered the 'dead' state.\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.470731214 -0400 EDT m=+10.350826660 container cleanup b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 (image=, name=a89535868ec0-infra, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5)\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Called cleanup.PersistentPostRunE(/usr/bin/podman --root /var/lib/containers/storage --runroot /run/containers/storage --log-level debug --cgroup-manager systemd --tmpdir /run/libpod --network-config-dir --network-backend netavark --volumepath /var/lib/containers/storage/volumes --db-backend sqlite --transient-store=false --runtime crun --storage-driver overlay --storage-opt overlay.mountopt=nodev,metacopy=on --events-backend journald --syslog container cleanup --stopped-only b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2)\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Shutting down engines\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=info msg=\"Received shutdown.Stop(), terminating!\" PID=32170\nJul 07 20:13:49 managed-node2 systemd[1]: Stopping libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope...\n\u2591\u2591 Subject: A stop job for unit libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1618.\nJul 07 20:13:49 managed-node2 systemd[1]: libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has successfully entered the 'dead' state.\nJul 07 20:13:49 managed-node2 systemd[1]: Stopped libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope.\n\u2591\u2591 Subject: A stop job for unit libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1618 and the job result is done.\nJul 07 20:13:49 managed-node2 systemd[1]: Removed slice cgroup machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice.\n\u2591\u2591 Subject: A stop job for unit machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1617 and the job result is done.\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.543096205 -0400 EDT m=+10.423191413 container remove f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5, io.containers.autoupdate=registry, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0)\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.569252202 -0400 EDT m=+10.449347410 container remove b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 (image=, name=a89535868ec0-infra, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5)\nJul 07 20:13:49 managed-node2 systemd[1]: machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice: Failed to open /run/systemd/transient/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice: No such file or directory\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.577427834 -0400 EDT m=+10.457523002 pod remove a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5 (image=, name=httpd2)\nJul 07 20:13:49 managed-node2 podman[32147]: Pods stopped:\nJul 07 20:13:49 managed-node2 podman[32147]: a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5\nJul 07 20:13:49 managed-node2 podman[32147]: Pods removed:\nJul 07 20:13:49 managed-node2 podman[32147]: a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5\nJul 07 20:13:49 managed-node2 podman[32147]: Secrets removed:\nJul 07 20:13:49 managed-node2 podman[32147]: Volumes removed:\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.577733569 -0400 EDT m=+10.457828934 network create 0a842076c75338ed23c6283f120afac661325ee3d3188d4246c57fcbd0ff921c (name=podman-default-kube-network, type=bridge)\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.601543415 -0400 EDT m=+10.481638618 container create 78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32 (image=, name=91ecf1609ad1-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service)\nJul 07 20:13:49 managed-node2 systemd[1]: Created slice cgroup machine-libpod_pod_d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0.slice.\n\u2591\u2591 Subject: A start job for unit machine-libpod_pod_d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0.slice has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit machine-libpod_pod_d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0.slice has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1619.\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.643353432 -0400 EDT m=+10.523448629 container create 08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b (image=, name=d17e7ef2e094-infra, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service)\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.649232898 -0400 EDT m=+10.529328325 pod create d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0 (image=, name=httpd2)\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.675477572 -0400 EDT m=+10.555572747 container create 3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, created_by=test/system/build-testimage, io.buildah.version=1.21.0, app=test, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service, created_at=2021-06-10T18:55:36Z)\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.675792512 -0400 EDT m=+10.555887719 container restart 78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32 (image=, name=91ecf1609ad1-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service)\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.651020296 -0400 EDT m=+10.531115614 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610\nJul 07 20:13:49 managed-node2 systemd[1]: Started libcrun container.\n\u2591\u2591 Subject: A start job for unit libpod-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32.scope has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit libpod-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32.scope has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1623.\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.731153306 -0400 EDT m=+10.611248537 container init 78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32 (image=, name=91ecf1609ad1-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service)\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.734097189 -0400 EDT m=+10.614192506 container start 78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32 (image=, name=91ecf1609ad1-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service)\nJul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7517] manager: (podman1): new Bridge device (/org/freedesktop/NetworkManager/Devices/5)\nJul 07 20:13:49 managed-node2 systemd-udevd[32184]: Network interface NamePolicy= disabled on kernel command line.\nJul 07 20:13:49 managed-node2 kernel: podman1: port 1(veth0) entered blocking state\nJul 07 20:13:49 managed-node2 kernel: podman1: port 1(veth0) entered disabled state\nJul 07 20:13:49 managed-node2 kernel: veth0: entered allmulticast mode\nJul 07 20:13:49 managed-node2 kernel: veth0: entered promiscuous mode\nJul 07 20:13:49 managed-node2 systemd-udevd[32189]: Network interface NamePolicy= disabled on kernel command line.\nJul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7610] manager: (veth0): new Veth device (/org/freedesktop/NetworkManager/Devices/6)\nJul 07 20:13:49 managed-node2 kernel: podman1: port 1(veth0) entered blocking state\nJul 07 20:13:49 managed-node2 kernel: podman1: port 1(veth0) entered forwarding state\nJul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7658] device (veth0): carrier: link connected\nJul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7662] device (podman1): carrier: link connected\nJul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7742] device (podman1): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external')\nJul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7747] device (podman1): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'external')\nJul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7759] device (podman1): Activation: starting connection 'podman1' (9a09baee-577d-45df-991f-e577871fe999)\nJul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7761] device (podman1): state change: disconnected -> prepare (reason 'none', managed-type: 'external')\nJul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7766] device (podman1): state change: prepare -> config (reason 'none', managed-type: 'external')\nJul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7769] device (podman1): state change: config -> ip-config (reason 'none', managed-type: 'external')\nJul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7773] device (podman1): state change: ip-config -> ip-check (reason 'none', managed-type: 'external')\nJul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7875] device (podman1): state change: ip-check -> secondaries (reason 'none', managed-type: 'external')\nJul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7879] device (podman1): state change: secondaries -> activated (reason 'none', managed-type: 'external')\nJul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7899] device (podman1): Activation: successful, device activated.\nJul 07 20:13:49 managed-node2 systemd[1]: Started /usr/libexec/podman/aardvark-dns --config /run/containers/networks/aardvark-dns -p 53 run.\n\u2591\u2591 Subject: A start job for unit run-rce7152e4cf79441b86b3f3ed7d6f4283.scope has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit run-rce7152e4cf79441b86b3f3ed7d6f4283.scope has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1627.\nJul 07 20:13:49 managed-node2 systemd[1]: Started libcrun container.\n\u2591\u2591 Subject: A start job for unit libpod-08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b.scope has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit libpod-08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b.scope has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1631.\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.929395798 -0400 EDT m=+10.809491054 container init 08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b (image=, name=d17e7ef2e094-infra, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service)\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.932349651 -0400 EDT m=+10.812444917 container start 08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b (image=, name=d17e7ef2e094-infra, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service)\nJul 07 20:13:49 managed-node2 systemd[1]: Started libcrun container.\n\u2591\u2591 Subject: A start job for unit libpod-3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5.scope has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit libpod-3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5.scope has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1636.\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.975052109 -0400 EDT m=+10.855147428 container init 3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0)\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.978227355 -0400 EDT m=+10.858322532 container start 3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, io.buildah.version=1.21.0, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage)\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.984086055 -0400 EDT m=+10.864181258 pod start d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0 (image=, name=httpd2)\nJul 07 20:13:49 managed-node2 systemd[1]: Started A template for running K8s workloads via podman-kube-play.\n\u2591\u2591 Subject: A start job for unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1478.\nJul 07 20:13:49 managed-node2 podman[32147]: Pod:\nJul 07 20:13:49 managed-node2 podman[32147]: d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0\nJul 07 20:13:49 managed-node2 podman[32147]: Container:\nJul 07 20:13:49 managed-node2 podman[32147]: 3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5\nJul 07 20:13:50 managed-node2 systemd[1]: var-lib-containers-storage-overlay\\x2dcontainers-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2-userdata-shm.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay\\x2dcontainers-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2-userdata-shm.mount has successfully entered the 'dead' state.\nJul 07 20:13:51 managed-node2 python3.9[32468]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:13:52 managed-node2 python3.9[32619]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/httpd3.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:13:53 managed-node2 python3.9[32769]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd3 state=directory owner=root group=root recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:13:53 managed-node2 python3.9[32918]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd3-create state=directory owner=root group=root recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:13:54 managed-node2 podman[33098]: 2025-07-07 20:13:54.593475273 -0400 EDT m=+0.280680463 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610\nJul 07 20:13:55 managed-node2 python3.9[33262]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd3.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:13:55 managed-node2 python3.9[33411]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d state=directory owner=root group=0 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:13:55 managed-node2 python3.9[33560]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd3.yml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True\nJul 07 20:13:56 managed-node2 python3.9[33680]: ansible-ansible.legacy.copy Invoked with dest=/etc/containers/ansible-kubernetes.d/httpd3.yml owner=root group=0 mode=0644 src=/root/.ansible/tmp/ansible-tmp-1751933635.6006646-13961-107071336953239/.source.yml _original_basename=._73jk67j follow=False checksum=5b3685de46cacb0a0661419a5a5898cbb3cf431c backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:13:56 managed-node2 python3.9[33829]: ansible-containers.podman.podman_play Invoked with state=started kube_file=/etc/containers/ansible-kubernetes.d/httpd3.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None debug=None quiet=None recreate=None userns=None log_level=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None\nJul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.647468979 -0400 EDT m=+0.017382508 network create 0a842076c75338ed23c6283f120afac661325ee3d3188d4246c57fcbd0ff921c (name=podman-default-kube-network, type=bridge)\nJul 07 20:13:56 managed-node2 systemd[1]: Created slice cgroup machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice.\n\u2591\u2591 Subject: A start job for unit machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1641.\nJul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.687774841 -0400 EDT m=+0.057688390 container create f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b (image=, name=06637f87acc7-infra, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7)\nJul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.693778889 -0400 EDT m=+0.063692415 pod create 06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7 (image=, name=httpd3)\nJul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.718042148 -0400 EDT m=+0.087955671 container create d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, app=test, io.containers.autoupdate=registry)\nJul 07 20:13:56 managed-node2 kernel: podman1: port 2(veth1) entered blocking state\nJul 07 20:13:56 managed-node2 kernel: podman1: port 2(veth1) entered disabled state\nJul 07 20:13:56 managed-node2 kernel: veth1: entered allmulticast mode\nJul 07 20:13:56 managed-node2 kernel: veth1: entered promiscuous mode\nJul 07 20:13:56 managed-node2 NetworkManager[644]: [1751933636.7464] manager: (veth1): new Veth device (/org/freedesktop/NetworkManager/Devices/7)\nJul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.695533824 -0400 EDT m=+0.065447531 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610\nJul 07 20:13:56 managed-node2 kernel: podman1: port 2(veth1) entered blocking state\nJul 07 20:13:56 managed-node2 kernel: podman1: port 2(veth1) entered forwarding state\nJul 07 20:13:56 managed-node2 NetworkManager[644]: [1751933636.7489] device (veth1): carrier: link connected\nJul 07 20:13:56 managed-node2 systemd-udevd[33860]: Network interface NamePolicy= disabled on kernel command line.\nJul 07 20:13:56 managed-node2 systemd[1]: Started libpod-conmon-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope.\n\u2591\u2591 Subject: A start job for unit libpod-conmon-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit libpod-conmon-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1646.\nJul 07 20:13:56 managed-node2 systemd[1]: Started libcrun container.\n\u2591\u2591 Subject: A start job for unit libpod-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit libpod-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1651.\nJul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.874054258 -0400 EDT m=+0.243967914 container init f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b (image=, name=06637f87acc7-infra, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7)\nJul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.877423946 -0400 EDT m=+0.247337594 container start f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b (image=, name=06637f87acc7-infra, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7)\nJul 07 20:13:56 managed-node2 systemd[1]: Started libpod-conmon-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope.\n\u2591\u2591 Subject: A start job for unit libpod-conmon-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit libpod-conmon-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1656.\nJul 07 20:13:56 managed-node2 systemd[1]: Started libcrun container.\n\u2591\u2591 Subject: A start job for unit libpod-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit libpod-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1661.\nJul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.931183676 -0400 EDT m=+0.301097308 container init d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry)\nJul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.93420031 -0400 EDT m=+0.304113971 container start d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry, app=test)\nJul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.940215927 -0400 EDT m=+0.310129474 pod start 06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7 (image=, name=httpd3)\nJul 07 20:13:57 managed-node2 python3.9[34065]: ansible-systemd Invoked with daemon_reload=True scope=system daemon_reexec=False no_block=False name=None state=None enabled=None force=None masked=None\nJul 07 20:13:57 managed-node2 systemd[1]: Reloading.\nJul 07 20:13:57 managed-node2 systemd-rc-local-generator[34083]: /etc/rc.d/rc.local is not marked executable, skipping.\nJul 07 20:13:58 managed-node2 python3.9[34248]: ansible-systemd Invoked with name=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service scope=system enabled=True daemon_reload=False daemon_reexec=False no_block=False state=None force=None masked=None\nJul 07 20:13:58 managed-node2 systemd[1]: Reloading.\nJul 07 20:13:58 managed-node2 systemd-rc-local-generator[34268]: /etc/rc.d/rc.local is not marked executable, skipping.\nJul 07 20:13:59 managed-node2 python3.9[34433]: ansible-systemd Invoked with name=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service scope=system state=started daemon_reload=False daemon_reexec=False no_block=False enabled=None force=None masked=None\nJul 07 20:13:59 managed-node2 systemd[1]: Starting A template for running K8s workloads via podman-kube-play...\n\u2591\u2591 Subject: A start job for unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1666.\nJul 07 20:13:59 managed-node2 podman[34437]: 2025-07-07 20:13:59.128818064 -0400 EDT m=+0.031043200 pod stop 06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7 (image=, name=httpd3)\nJul 07 20:13:59 managed-node2 systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit NetworkManager-dispatcher.service has successfully entered the 'dead' state.\nJul 07 20:14:09 managed-node2 podman[34437]: time=\"2025-07-07T20:14:09-04:00\" level=warning msg=\"StopSignal SIGTERM failed to stop container httpd3-httpd3 in 10 seconds, resorting to SIGKILL\"\nJul 07 20:14:09 managed-node2 systemd[1]: libpod-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit libpod-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope has successfully entered the 'dead' state.\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.161891677 -0400 EDT m=+10.064117231 container died d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry, app=test, created_at=2021-06-10T18:55:36Z)\nJul 07 20:14:09 managed-node2 systemd[1]: var-lib-containers-storage-overlay-ea9de557ba623f700a03785c93f2fae562cdde6abc47bc4578532dd100d74f80-merged.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay-ea9de557ba623f700a03785c93f2fae562cdde6abc47bc4578532dd100d74f80-merged.mount has successfully entered the 'dead' state.\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.208209429 -0400 EDT m=+10.110434520 container cleanup d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry)\nJul 07 20:14:09 managed-node2 systemd[1]: libpod-conmon-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit libpod-conmon-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope has successfully entered the 'dead' state.\nJul 07 20:14:09 managed-node2 systemd[1]: libpod-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit libpod-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope has successfully entered the 'dead' state.\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.228424072 -0400 EDT m=+10.130649401 container died f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b (image=, name=06637f87acc7-infra)\nJul 07 20:14:09 managed-node2 kernel: podman1: port 2(veth1) entered disabled state\nJul 07 20:14:09 managed-node2 kernel: veth1 (unregistering): left allmulticast mode\nJul 07 20:14:09 managed-node2 kernel: veth1 (unregistering): left promiscuous mode\nJul 07 20:14:09 managed-node2 kernel: podman1: port 2(veth1) entered disabled state\nJul 07 20:14:09 managed-node2 systemd[1]: run-netns-netns\\x2db10132db\\x2d5af1\\x2d0f8c\\x2d38ab\\x2d1e8eaa97e6f2.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit run-netns-netns\\x2db10132db\\x2d5af1\\x2d0f8c\\x2d38ab\\x2d1e8eaa97e6f2.mount has successfully entered the 'dead' state.\nJul 07 20:14:09 managed-node2 systemd[1]: var-lib-containers-storage-overlay\\x2dcontainers-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b-rootfs-merge.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay\\x2dcontainers-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b-rootfs-merge.mount has successfully entered the 'dead' state.\nJul 07 20:14:09 managed-node2 systemd[1]: var-lib-containers-storage-overlay\\x2dcontainers-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b-userdata-shm.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay\\x2dcontainers-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b-userdata-shm.mount has successfully entered the 'dead' state.\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.330607359 -0400 EDT m=+10.232832448 container cleanup f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b (image=, name=06637f87acc7-infra, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7)\nJul 07 20:14:09 managed-node2 systemd[1]: libpod-conmon-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit libpod-conmon-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope has successfully entered the 'dead' state.\nJul 07 20:14:09 managed-node2 systemd[1]: Stopped libpod-conmon-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope.\n\u2591\u2591 Subject: A stop job for unit libpod-conmon-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit libpod-conmon-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1740 and the job result is done.\nJul 07 20:14:09 managed-node2 systemd[1]: Removed slice cgroup machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice.\n\u2591\u2591 Subject: A stop job for unit machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1739 and the job result is done.\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.33777977 -0400 EDT m=+10.240004889 pod stop 06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7 (image=, name=httpd3)\nJul 07 20:14:09 managed-node2 systemd[1]: machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice: Failed to open /run/systemd/transient/machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice: No such file or directory\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.341042664 -0400 EDT m=+10.243267751 pod stop 06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7 (image=, name=httpd3)\nJul 07 20:14:09 managed-node2 systemd[1]: machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice: Failed to open /run/systemd/transient/machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice: No such file or directory\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.366974147 -0400 EDT m=+10.269199273 container remove d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry)\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.392885411 -0400 EDT m=+10.295110535 container remove f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b (image=, name=06637f87acc7-infra, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7)\nJul 07 20:14:09 managed-node2 systemd[1]: machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice: Failed to open /run/systemd/transient/machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice: No such file or directory\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.400728494 -0400 EDT m=+10.302953580 pod remove 06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7 (image=, name=httpd3)\nJul 07 20:14:09 managed-node2 podman[34437]: Pods stopped:\nJul 07 20:14:09 managed-node2 podman[34437]: 06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7\nJul 07 20:14:09 managed-node2 podman[34437]: Pods removed:\nJul 07 20:14:09 managed-node2 podman[34437]: 06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7\nJul 07 20:14:09 managed-node2 podman[34437]: Secrets removed:\nJul 07 20:14:09 managed-node2 podman[34437]: Volumes removed:\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.40084298 -0400 EDT m=+10.303068230 network create 0a842076c75338ed23c6283f120afac661325ee3d3188d4246c57fcbd0ff921c (name=podman-default-kube-network, type=bridge)\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.421632285 -0400 EDT m=+10.323857401 container create eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc (image=, name=55c9b4d93968-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service)\nJul 07 20:14:09 managed-node2 systemd[1]: Created slice cgroup machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice.\n\u2591\u2591 Subject: A start job for unit machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1741.\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.456874206 -0400 EDT m=+10.359099322 container create e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641 (image=, name=0b34d5e9f949-infra, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service)\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.463729321 -0400 EDT m=+10.365954523 pod create 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36 (image=, name=httpd3)\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.466690532 -0400 EDT m=+10.368915893 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.495205758 -0400 EDT m=+10.397430876 container create 9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, created_by=test/system/build-testimage, io.buildah.version=1.21.0, app=test, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service, created_at=2021-06-10T18:55:36Z)\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.495540708 -0400 EDT m=+10.397765831 container restart eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc (image=, name=55c9b4d93968-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service)\nJul 07 20:14:09 managed-node2 systemd[1]: Started libcrun container.\n\u2591\u2591 Subject: A start job for unit libpod-eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc.scope has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit libpod-eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc.scope has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1745.\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.541938993 -0400 EDT m=+10.444164111 container init eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc (image=, name=55c9b4d93968-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service)\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.54556115 -0400 EDT m=+10.447786446 container start eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc (image=, name=55c9b4d93968-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service)\nJul 07 20:14:09 managed-node2 kernel: podman1: port 2(veth1) entered blocking state\nJul 07 20:14:09 managed-node2 kernel: podman1: port 2(veth1) entered disabled state\nJul 07 20:14:09 managed-node2 kernel: veth1: entered allmulticast mode\nJul 07 20:14:09 managed-node2 kernel: veth1: entered promiscuous mode\nJul 07 20:14:09 managed-node2 kernel: podman1: port 2(veth1) entered blocking state\nJul 07 20:14:09 managed-node2 kernel: podman1: port 2(veth1) entered forwarding state\nJul 07 20:14:09 managed-node2 NetworkManager[644]: [1751933649.5632] manager: (veth1): new Veth device (/org/freedesktop/NetworkManager/Devices/8)\nJul 07 20:14:09 managed-node2 NetworkManager[644]: [1751933649.5684] device (veth1): carrier: link connected\nJul 07 20:14:09 managed-node2 systemd-udevd[34477]: Network interface NamePolicy= disabled on kernel command line.\nJul 07 20:14:09 managed-node2 systemd[1]: Started libcrun container.\n\u2591\u2591 Subject: A start job for unit libpod-e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641.scope has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit libpod-e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641.scope has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1749.\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.678444807 -0400 EDT m=+10.580670010 container init e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641 (image=, name=0b34d5e9f949-infra, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service)\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.681382862 -0400 EDT m=+10.583608134 container start e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641 (image=, name=0b34d5e9f949-infra, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service)\nJul 07 20:14:09 managed-node2 systemd[1]: Started libcrun container.\n\u2591\u2591 Subject: A start job for unit libpod-9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109.scope has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit libpod-9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109.scope has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1754.\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.728795591 -0400 EDT m=+10.631020730 container init 9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service, app=test)\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.73177019 -0400 EDT m=+10.633995385 container start 9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, io.buildah.version=1.21.0, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage)\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.737566302 -0400 EDT m=+10.639791423 pod start 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36 (image=, name=httpd3)\nJul 07 20:14:09 managed-node2 podman[34437]: Pod:\nJul 07 20:14:09 managed-node2 podman[34437]: 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36\nJul 07 20:14:09 managed-node2 podman[34437]: Container:\nJul 07 20:14:09 managed-node2 podman[34437]: 9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109\nJul 07 20:14:09 managed-node2 systemd[1]: Started A template for running K8s workloads via podman-kube-play.\n\u2591\u2591 Subject: A start job for unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1666.\nJul 07 20:14:10 managed-node2 sudo[34704]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:14:10 managed-node2 sudo[34704]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cflwckudrftyvniytbtokrawwncefyyk ; /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933650.260143-14379-210639111977054/AnsiballZ_command.py'\nJul 07 20:14:10 managed-node2 sudo[34704]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0)\nJul 07 20:14:10 managed-node2 python3.9[34706]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod inspect httpd1 --format '{{.State}}' _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:14:10 managed-node2 systemd[27808]: Started podman-34715.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 110.\nJul 07 20:14:10 managed-node2 sudo[34704]: pam_unix(sudo:session): session closed for user podman_basic_user\nJul 07 20:14:10 managed-node2 python3.9[34872]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod inspect httpd2 --format '{{.State}}' _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:14:11 managed-node2 python3.9[35029]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod inspect httpd3 --format '{{.State}}' _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:14:11 managed-node2 sudo[35186]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:14:11 managed-node2 sudo[35186]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-utoemynxahtksrgkxmppktxcnibfjzhy ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933651.5642703-14440-50388675366557/AnsiballZ_command.py'\nJul 07 20:14:11 managed-node2 sudo[35186]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0)\nJul 07 20:14:11 managed-node2 python3.9[35188]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail\n systemctl --user list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd1[.]yml[.]service[ ]+loaded[ ]+active '\n _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:14:11 managed-node2 sudo[35186]: pam_unix(sudo:session): session closed for user podman_basic_user\nJul 07 20:14:12 managed-node2 python3.9[35340]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail\n systemctl --system list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd2[.]yml[.]service[ ]+loaded[ ]+active '\n _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:14:12 managed-node2 python3.9[35492]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail\n systemctl --system list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd3[.]yml[.]service[ ]+loaded[ ]+active '\n _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:14:13 managed-node2 python3.9[35644]: ansible-ansible.legacy.uri Invoked with url=http://localhost:15001/index.txt return_content=True force=False http_agent=ansible-httpget use_proxy=True validate_certs=True force_basic_auth=False use_gssapi=False body_format=raw method=GET follow_redirects=safe status_code=[200] timeout=30 headers={} remote_src=False unredirected_headers=[] decompress=True use_netrc=True unsafe_writes=False url_username=None url_password=NOT_LOGGING_PARAMETER client_cert=None client_key=None dest=None body=None src=None creates=None removes=None unix_socket=None ca_path=None ciphers=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:14:13 managed-node2 python3.9[35795]: ansible-ansible.legacy.uri Invoked with url=http://localhost:15002/index.txt return_content=True force=False http_agent=ansible-httpget use_proxy=True validate_certs=True force_basic_auth=False use_gssapi=False body_format=raw method=GET follow_redirects=safe status_code=[200] timeout=30 headers={} remote_src=False unredirected_headers=[] decompress=True use_netrc=True unsafe_writes=False url_username=None url_password=NOT_LOGGING_PARAMETER client_cert=None client_key=None dest=None body=None src=None creates=None removes=None unix_socket=None ca_path=None ciphers=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:14:14 managed-node2 python3.9[35945]: ansible-ansible.legacy.command Invoked with _raw_params=ls -alrtF /tmp/lsr_8xkyz6d8_podman/httpd1-create _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:14:14 managed-node2 python3.9[36095]: ansible-ansible.legacy.command Invoked with _raw_params=ls -alrtF /tmp/lsr_8xkyz6d8_podman/httpd2-create _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:14:14 managed-node2 python3.9[36245]: ansible-ansible.legacy.command Invoked with _raw_params=ls -alrtF /tmp/lsr_8xkyz6d8_podman/httpd3-create _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:14:17 managed-node2 python3.9[36544]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:14:18 managed-node2 python3.9[36699]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:14:21 managed-node2 python3.9[36850]: ansible-ansible.legacy.dnf Invoked with name=['firewalld'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None\nJul 07 20:14:23 managed-node2 python3.9[37000]: ansible-systemd Invoked with name=firewalld masked=False daemon_reload=False daemon_reexec=False scope=system no_block=False state=None enabled=None force=None\nJul 07 20:14:23 managed-node2 python3.9[37151]: ansible-ansible.legacy.systemd Invoked with name=firewalld enabled=True state=started daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None\nJul 07 20:14:24 managed-node2 python3.9[37302]: ansible-fedora.linux_system_roles.firewall_lib Invoked with port=['15001-15003/tcp'] permanent=True runtime=True state=enabled __report_changed=True online=True service=[] source_port=[] forward_port=[] rich_rule=[] source=[] interface=[] interface_pci_id=[] icmp_block=[] timeout=0 ipset_entries=[] protocol=[] helper_module=[] destination=[] includes=[] firewalld_conf=None masquerade=None icmp_block_inversion=None target=None zone=None set_default_zone=None ipset=None ipset_type=None description=None short=None\nJul 07 20:14:26 managed-node2 python3.9[37451]: ansible-ansible.legacy.dnf Invoked with name=['python3-libselinux', 'python3-policycoreutils'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None\nJul 07 20:14:27 managed-node2 python3.9[37601]: ansible-ansible.legacy.dnf Invoked with name=['grubby'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None\nJul 07 20:14:29 managed-node2 python3.9[37751]: ansible-ansible.legacy.dnf Invoked with name=['policycoreutils-python-utils'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None\nJul 07 20:14:30 managed-node2 python3.9[37901]: ansible-setup Invoked with filter=['ansible_selinux'] gather_subset=['all'] gather_timeout=10 fact_path=/etc/ansible/facts.d\nJul 07 20:14:32 managed-node2 python3.9[38089]: ansible-fedora.linux_system_roles.local_seport Invoked with ports=['15001-15003'] proto=tcp setype=http_port_t state=present local=False ignore_selinux_state=False reload=True\nJul 07 20:14:33 managed-node2 python3.9[38238]: ansible-fedora.linux_system_roles.selinux_modules_facts Invoked\nJul 07 20:14:37 managed-node2 python3.9[38387]: ansible-getent Invoked with database=passwd key=podman_basic_user fail_key=False service=None split=None\nJul 07 20:14:38 managed-node2 python3.9[38537]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:14:38 managed-node2 python3.9[38688]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:14:39 managed-node2 python3.9[38838]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:14:40 managed-node2 python3.9[38988]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:14:40 managed-node2 python3.9[39138]: ansible-ansible.legacy.command Invoked with creates=/var/lib/systemd/linger/podman_basic_user _raw_params=loginctl enable-linger podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None removes=None stdin=None\nJul 07 20:14:41 managed-node2 python3.9[39287]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd1 state=directory owner=podman_basic_user group=3001 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:14:41 managed-node2 python3.9[39436]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd1-create state=directory owner=podman_basic_user group=3001 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:14:42 managed-node2 sudo[39585]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:14:42 managed-node2 sudo[39585]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dteqtauzgahpdwlqmxqoqvigwdlcbwgx ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933682.0866897-15498-43758369682888/AnsiballZ_podman_image.py'\nJul 07 20:14:42 managed-node2 sudo[39585]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0)\nJul 07 20:14:42 managed-node2 systemd[27808]: Started podman-39588.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 114.\nJul 07 20:14:42 managed-node2 systemd[27808]: Started podman-39596.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 118.\nJul 07 20:14:42 managed-node2 systemd[27808]: Started podman-39604.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 122.\nJul 07 20:14:42 managed-node2 systemd[27808]: Started podman-39611.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 126.\nJul 07 20:14:42 managed-node2 systemd[27808]: Started podman-39618.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 130.\nJul 07 20:14:43 managed-node2 systemd[27808]: Started podman-39626.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 134.\nJul 07 20:14:43 managed-node2 sudo[39585]: pam_unix(sudo:session): session closed for user podman_basic_user\nJul 07 20:14:43 managed-node2 python3.9[39782]: ansible-stat Invoked with path=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:14:43 managed-node2 python3.9[39933]: ansible-file Invoked with path=/home/podman_basic_user/.config/containers/ansible-kubernetes.d state=directory owner=podman_basic_user group=3001 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:14:44 managed-node2 python3.9[40082]: ansible-ansible.legacy.stat Invoked with path=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True\nJul 07 20:14:44 managed-node2 python3.9[40157]: ansible-ansible.legacy.file Invoked with owner=podman_basic_user group=3001 mode=0644 dest=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml _original_basename=.3ieew216 recurse=False state=file path=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:14:44 managed-node2 sudo[40306]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:14:44 managed-node2 sudo[40306]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ustvjrkhcohlxhhxarkriiiprsdnnhal ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933684.6664407-15601-193248781752908/AnsiballZ_podman_play.py'\nJul 07 20:14:44 managed-node2 sudo[40306]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0)\nJul 07 20:14:44 managed-node2 python3.9[40308]: ansible-containers.podman.podman_play Invoked with state=started debug=True log_level=debug kube_file=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None quiet=None recreate=None userns=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None\nJul 07 20:14:45 managed-node2 systemd[27808]: Started podman-40315.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 138.\nJul 07 20:14:45 managed-node2 systemd[27808]: Created slice cgroup user-libpod_pod_f60d38092e129513b51a0b2f07fc5347e46ea863cdd2a5b1d5752245e63e591c.slice.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 142.\nJul 07 20:14:45 managed-node2 python3.9[40308]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE command: /bin/podman play kube --start=true --log-level=debug /home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml\nJul 07 20:14:45 managed-node2 python3.9[40308]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stdout: \nJul 07 20:14:45 managed-node2 python3.9[40308]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stderr: time=\"2025-07-07T20:14:45-04:00\" level=info msg=\"/bin/podman filtering at log level debug\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Called kube.PersistentPreRunE(/bin/podman play kube --start=true --log-level=debug /home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml)\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Using conmon: \\\"/usr/bin/conmon\\\"\"\n time=\"2025-07-07T20:14:45-04:00\" level=info msg=\"Using sqlite as database backend\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"systemd-logind: Unknown object '/'.\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Using graph driver overlay\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Using graph root /home/podman_basic_user/.local/share/containers/storage\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Using run root /run/user/3001/containers\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Using static dir /home/podman_basic_user/.local/share/containers/storage/libpod\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Using tmp dir /run/user/3001/libpod/tmp\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Using volume path /home/podman_basic_user/.local/share/containers/storage/volumes\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Using transient store: false\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"[graphdriver] trying provided driver \\\"overlay\\\"\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Cached value indicated that overlay is supported\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Cached value indicated that overlay is supported\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Cached value indicated that metacopy is not being used\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Cached value indicated that native-diff is usable\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"backingFs=xfs, projectQuotaSupported=false, useNativeDiff=true, usingMetacopy=false\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Initializing event backend file\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Configured OCI runtime krun initialization failed: no valid executable found for OCI runtime krun: invalid argument\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Configured OCI runtime crun-vm initialization failed: no valid executable found for OCI runtime crun-vm: invalid argument\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Configured OCI runtime crun-wasm initialization failed: no valid executable found for OCI runtime crun-wasm: invalid argument\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Configured OCI runtime runc initialization failed: no valid executable found for OCI runtime runc: invalid argument\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Configured OCI runtime ocijail initialization failed: no valid executable found for OCI runtime ocijail: invalid argument\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Configured OCI runtime runj initialization failed: no valid executable found for OCI runtime runj: invalid argument\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Configured OCI runtime kata initialization failed: no valid executable found for OCI runtime kata: invalid argument\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Configured OCI runtime runsc initialization failed: no valid executable found for OCI runtime runsc: invalid argument\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Configured OCI runtime youki initialization failed: no valid executable found for OCI runtime youki: invalid argument\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Using OCI runtime \\\"/usr/bin/crun\\\"\"\n time=\"2025-07-07T20:14:45-04:00\" level=info msg=\"Setting parallel job count to 7\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Successfully loaded network podman-default-kube-network: &{podman-default-kube-network f726a0dfc720eef9b785c3acdef2ddc0ef169e999e9185270f7b5fdceae44256 bridge podman1 2025-07-07 20:13:16.261934543 -0400 EDT [{{{10.89.0.0 ffffff00}} 10.89.0.1 }] [] false false true [] map[] map[] map[driver:host-local]}\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Successfully loaded 2 networks\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Pod using bridge network mode\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Created cgroup path user.slice/user-libpod_pod_f60d38092e129513b51a0b2f07fc5347e46ea863cdd2a5b1d5752245e63e591c.slice for parent user.slice and name libpod_pod_f60d38092e129513b51a0b2f07fc5347e46ea863cdd2a5b1d5752245e63e591c\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Created cgroup user.slice/user-libpod_pod_f60d38092e129513b51a0b2f07fc5347e46ea863cdd2a5b1d5752245e63e591c.slice\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Got pod cgroup as user.slice/user-3001.slice/user@3001.service/user.slice/user-libpod_pod_f60d38092e129513b51a0b2f07fc5347e46ea863cdd2a5b1d5752245e63e591c.slice\"\n Error: adding pod to state: name \"httpd1\" is in use: pod already exists\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Shutting down engines\"\n time=\"2025-07-07T20:14:45-04:00\" level=info msg=\"Received shutdown.Stop(), terminating!\" PID=40315\nJul 07 20:14:45 managed-node2 python3.9[40308]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE rc: 125\nJul 07 20:14:45 managed-node2 sudo[40306]: pam_unix(sudo:session): session closed for user podman_basic_user\nJul 07 20:14:46 managed-node2 python3.9[40471]: ansible-getent Invoked with database=passwd key=root fail_key=False service=None split=None\nJul 07 20:14:46 managed-node2 python3.9[40621]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:14:47 managed-node2 python3.9[40772]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/httpd2.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:14:49 managed-node2 python3.9[40922]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd2 state=directory owner=root group=root recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:14:49 managed-node2 python3.9[41071]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd2-create state=directory owner=root group=root recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:14:50 managed-node2 podman[41251]: 2025-07-07 20:14:50.30172649 -0400 EDT m=+0.335741630 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610\nJul 07 20:14:50 managed-node2 python3.9[41415]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd2.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:14:51 managed-node2 python3.9[41566]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d state=directory owner=root group=0 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:14:51 managed-node2 python3.9[41715]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd2.yml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True\nJul 07 20:14:51 managed-node2 python3.9[41790]: ansible-ansible.legacy.file Invoked with owner=root group=0 mode=0644 dest=/etc/containers/ansible-kubernetes.d/httpd2.yml _original_basename=.7tnd0tsm recurse=False state=file path=/etc/containers/ansible-kubernetes.d/httpd2.yml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:14:52 managed-node2 python3.9[41939]: ansible-containers.podman.podman_play Invoked with state=started debug=True log_level=debug kube_file=/etc/containers/ansible-kubernetes.d/httpd2.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None quiet=None recreate=None userns=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None\nJul 07 20:14:52 managed-node2 podman[41946]: 2025-07-07 20:14:52.281267633 -0400 EDT m=+0.019255481 network create 0a842076c75338ed23c6283f120afac661325ee3d3188d4246c57fcbd0ff921c (name=podman-default-kube-network, type=bridge)\nJul 07 20:14:52 managed-node2 systemd[1]: Created slice cgroup machine-libpod_pod_55cf40f422ab8478e9c4da3bb3e4a8e5c2ce02f1f51e3c1d7ff9913ae8d6b45a.slice.\n\u2591\u2591 Subject: A start job for unit machine-libpod_pod_55cf40f422ab8478e9c4da3bb3e4a8e5c2ce02f1f51e3c1d7ff9913ae8d6b45a.slice has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit machine-libpod_pod_55cf40f422ab8478e9c4da3bb3e4a8e5c2ce02f1f51e3c1d7ff9913ae8d6b45a.slice has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1759.\nJul 07 20:14:52 managed-node2 python3.9[41939]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE command: /usr/bin/podman play kube --start=true --log-level=debug /etc/containers/ansible-kubernetes.d/httpd2.yml\nJul 07 20:14:52 managed-node2 python3.9[41939]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stdout: \nJul 07 20:14:52 managed-node2 python3.9[41939]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stderr: time=\"2025-07-07T20:14:52-04:00\" level=info msg=\"/usr/bin/podman filtering at log level debug\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Called kube.PersistentPreRunE(/usr/bin/podman play kube --start=true --log-level=debug /etc/containers/ansible-kubernetes.d/httpd2.yml)\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Using conmon: \\\"/usr/bin/conmon\\\"\"\n time=\"2025-07-07T20:14:52-04:00\" level=info msg=\"Using sqlite as database backend\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Using graph driver overlay\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Using graph root /var/lib/containers/storage\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Using run root /run/containers/storage\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Using static dir /var/lib/containers/storage/libpod\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Using tmp dir /run/libpod\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Using volume path /var/lib/containers/storage/volumes\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Using transient store: false\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"[graphdriver] trying provided driver \\\"overlay\\\"\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Cached value indicated that overlay is supported\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Cached value indicated that overlay is supported\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Cached value indicated that metacopy is being used\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Cached value indicated that native-diff is not being used\"\n time=\"2025-07-07T20:14:52-04:00\" level=info msg=\"Not using native diff for overlay, this may cause degraded performance for building images: kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"backingFs=xfs, projectQuotaSupported=false, useNativeDiff=false, usingMetacopy=true\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Initializing event backend journald\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Configured OCI runtime runc initialization failed: no valid executable found for OCI runtime runc: invalid argument\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Configured OCI runtime runj initialization failed: no valid executable found for OCI runtime runj: invalid argument\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Configured OCI runtime kata initialization failed: no valid executable found for OCI runtime kata: invalid argument\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Configured OCI runtime krun initialization failed: no valid executable found for OCI runtime krun: invalid argument\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Configured OCI runtime ocijail initialization failed: no valid executable found for OCI runtime ocijail: invalid argument\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Configured OCI runtime crun-vm initialization failed: no valid executable found for OCI runtime crun-vm: invalid argument\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Configured OCI runtime runsc initialization failed: no valid executable found for OCI runtime runsc: invalid argument\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Configured OCI runtime youki initialization failed: no valid executable found for OCI runtime youki: invalid argument\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Configured OCI runtime crun-wasm initialization failed: no valid executable found for OCI runtime crun-wasm: invalid argument\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Using OCI runtime \\\"/usr/bin/crun\\\"\"\n time=\"2025-07-07T20:14:52-04:00\" level=info msg=\"Setting parallel job count to 7\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Successfully loaded network podman-default-kube-network: &{podman-default-kube-network 0a842076c75338ed23c6283f120afac661325ee3d3188d4246c57fcbd0ff921c bridge podman1 2025-07-07 20:11:21.084048926 -0400 EDT [{{{10.89.0.0 ffffff00}} 10.89.0.1 }] [] false false true [] map[] map[] map[driver:host-local]}\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Successfully loaded 2 networks\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Pod using bridge network mode\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Created cgroup path machine.slice/machine-libpod_pod_55cf40f422ab8478e9c4da3bb3e4a8e5c2ce02f1f51e3c1d7ff9913ae8d6b45a.slice for parent machine.slice and name libpod_pod_55cf40f422ab8478e9c4da3bb3e4a8e5c2ce02f1f51e3c1d7ff9913ae8d6b45a\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Created cgroup machine.slice/machine-libpod_pod_55cf40f422ab8478e9c4da3bb3e4a8e5c2ce02f1f51e3c1d7ff9913ae8d6b45a.slice\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Got pod cgroup as machine.slice/machine-libpod_pod_55cf40f422ab8478e9c4da3bb3e4a8e5c2ce02f1f51e3c1d7ff9913ae8d6b45a.slice\"\n Error: adding pod to state: name \"httpd2\" is in use: pod already exists\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Shutting down engines\"\nJul 07 20:14:52 managed-node2 python3.9[41939]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE rc: 125\nJul 07 20:14:53 managed-node2 python3.9[42102]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:14:55 managed-node2 python3.9[42253]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/httpd3.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:14:56 managed-node2 python3.9[42403]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd3 state=directory owner=root group=root recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:14:56 managed-node2 python3.9[42552]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd3-create state=directory owner=root group=root recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:14:57 managed-node2 podman[42732]: 2025-07-07 20:14:57.595089727 -0400 EDT m=+0.334374931 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610\nJul 07 20:14:57 managed-node2 python3.9[42895]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd3.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:14:58 managed-node2 python3.9[43046]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d state=directory owner=root group=0 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:14:59 managed-node2 python3.9[43195]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd3.yml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True\nJul 07 20:14:59 managed-node2 python3.9[43270]: ansible-ansible.legacy.file Invoked with owner=root group=0 mode=0644 dest=/etc/containers/ansible-kubernetes.d/httpd3.yml _original_basename=.fnfhf1h4 recurse=False state=file path=/etc/containers/ansible-kubernetes.d/httpd3.yml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:14:59 managed-node2 python3.9[43419]: ansible-containers.podman.podman_play Invoked with state=started kube_file=/etc/containers/ansible-kubernetes.d/httpd3.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None debug=None quiet=None recreate=None userns=None log_level=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None\nJul 07 20:14:59 managed-node2 podman[43426]: 2025-07-07 20:14:59.794215832 -0400 EDT m=+0.017981924 network create 0a842076c75338ed23c6283f120afac661325ee3d3188d4246c57fcbd0ff921c (name=podman-default-kube-network, type=bridge)\nJul 07 20:14:59 managed-node2 systemd[1]: Created slice cgroup machine-libpod_pod_a956533ce71c546925cb35266c34fb208b1e49cd00e4934b1886b8ae13aea530.slice.\n\u2591\u2591 Subject: A start job for unit machine-libpod_pod_a956533ce71c546925cb35266c34fb208b1e49cd00e4934b1886b8ae13aea530.slice has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit machine-libpod_pod_a956533ce71c546925cb35266c34fb208b1e49cd00e4934b1886b8ae13aea530.slice has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1763.\nJul 07 20:15:00 managed-node2 sudo[43582]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:15:00 managed-node2 sudo[43582]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-copmprovbtozwjdqvrxslhkmftgtigcs ; /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933700.520565-16475-105270240946276/AnsiballZ_command.py'\nJul 07 20:15:00 managed-node2 sudo[43582]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0)\nJul 07 20:15:00 managed-node2 python3.9[43584]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod inspect httpd1 --format '{{.State}}' _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:15:00 managed-node2 systemd[27808]: Started podman-43592.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 146.\nJul 07 20:15:00 managed-node2 sudo[43582]: pam_unix(sudo:session): session closed for user podman_basic_user\nJul 07 20:15:01 managed-node2 python3.9[43750]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod inspect httpd2 --format '{{.State}}' _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:15:01 managed-node2 python3.9[43908]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod inspect httpd3 --format '{{.State}}' _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:15:02 managed-node2 sudo[44065]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:15:02 managed-node2 sudo[44065]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-omhmgepybavqbezpokrriumisrazocox ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933701.8765578-16521-15111276780862/AnsiballZ_command.py'\nJul 07 20:15:02 managed-node2 sudo[44065]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0)\nJul 07 20:15:02 managed-node2 python3.9[44067]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail\n systemctl --user list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd1[.]yml[.]service[ ]+loaded[ ]+active '\n _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:15:02 managed-node2 sudo[44065]: pam_unix(sudo:session): session closed for user podman_basic_user\nJul 07 20:15:02 managed-node2 python3.9[44219]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail\n systemctl --system list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd2[.]yml[.]service[ ]+loaded[ ]+active '\n _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:15:02 managed-node2 python3.9[44371]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail\n systemctl --system list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd3[.]yml[.]service[ ]+loaded[ ]+active '\n _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:15:03 managed-node2 python3.9[44523]: ansible-ansible.legacy.uri Invoked with url=http://localhost:15001/index.txt return_content=True force=False http_agent=ansible-httpget use_proxy=True validate_certs=True force_basic_auth=False use_gssapi=False body_format=raw method=GET follow_redirects=safe status_code=[200] timeout=30 headers={} remote_src=False unredirected_headers=[] decompress=True use_netrc=True unsafe_writes=False url_username=None url_password=NOT_LOGGING_PARAMETER client_cert=None client_key=None dest=None body=None src=None creates=None removes=None unix_socket=None ca_path=None ciphers=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:15:03 managed-node2 python3.9[44673]: ansible-ansible.legacy.uri Invoked with url=http://localhost:15002/index.txt return_content=True force=False http_agent=ansible-httpget use_proxy=True validate_certs=True force_basic_auth=False use_gssapi=False body_format=raw method=GET follow_redirects=safe status_code=[200] timeout=30 headers={} remote_src=False unredirected_headers=[] decompress=True use_netrc=True unsafe_writes=False url_username=None url_password=NOT_LOGGING_PARAMETER client_cert=None client_key=None dest=None body=None src=None creates=None removes=None unix_socket=None ca_path=None ciphers=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:15:04 managed-node2 python3.9[44823]: ansible-ansible.legacy.uri Invoked with url=http://localhost:15003/index.txt return_content=True force=False http_agent=ansible-httpget use_proxy=True validate_certs=True force_basic_auth=False use_gssapi=False body_format=raw method=GET follow_redirects=safe status_code=[200] timeout=30 headers={} remote_src=False unredirected_headers=[] decompress=True use_netrc=True unsafe_writes=False url_username=None url_password=NOT_LOGGING_PARAMETER client_cert=None client_key=None dest=None body=None src=None creates=None removes=None unix_socket=None ca_path=None ciphers=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:15:07 managed-node2 python3.9[45122]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:15:08 managed-node2 python3.9[45277]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:15:11 managed-node2 python3.9[45428]: ansible-getent Invoked with database=passwd key=podman_basic_user fail_key=False service=None split=None\nJul 07 20:15:12 managed-node2 python3.9[45578]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:15:12 managed-node2 python3.9[45729]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:15:12 managed-node2 python3.9[45879]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:15:14 managed-node2 python3.9[46029]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:15:14 managed-node2 python3.9[46179]: ansible-stat Invoked with path=/run/user/3001 follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:15:15 managed-node2 sudo[46330]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:15:15 managed-node2 sudo[46330]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yrlqohfxmrbjsiwrdleklbogdfrytzax ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933715.029206-17136-162272482862195/AnsiballZ_systemd.py'\nJul 07 20:15:15 managed-node2 sudo[46330]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0)\nJul 07 20:15:15 managed-node2 python3.9[46332]: ansible-systemd Invoked with name=podman-kube@-home-podman_basic_user-.config-containers-ansible\\x2dkubernetes.d-httpd1.yml.service scope=user state=stopped enabled=False daemon_reload=False daemon_reexec=False no_block=False force=None masked=None\nJul 07 20:15:15 managed-node2 systemd[27808]: Reloading.\nJul 07 20:15:15 managed-node2 systemd[27808]: Stopping A template for running K8s workloads via podman-kube-play...\n\u2591\u2591 Subject: A stop job for unit UNIT has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 150.\nJul 07 20:15:25 managed-node2 podman[46347]: time=\"2025-07-07T20:15:25-04:00\" level=warning msg=\"StopSignal SIGTERM failed to stop container httpd1-httpd1 in 10 seconds, resorting to SIGKILL\"\nJul 07 20:15:25 managed-node2 kernel: podman1: port 1(veth0) entered disabled state\nJul 07 20:15:25 managed-node2 kernel: veth0 (unregistering): left allmulticast mode\nJul 07 20:15:25 managed-node2 kernel: veth0 (unregistering): left promiscuous mode\nJul 07 20:15:25 managed-node2 kernel: podman1: port 1(veth0) entered disabled state\nJul 07 20:15:25 managed-node2 systemd[27808]: Removed slice cgroup user-libpod_pod_a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4.slice.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 151 and the job result is done.\nJul 07 20:15:25 managed-node2 systemd[27808]: user-libpod_pod_a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4.slice: Failed to open /run/user/3001/systemd/transient/user-libpod_pod_a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4.slice: No such file or directory\nJul 07 20:15:25 managed-node2 systemd[27808]: user-libpod_pod_a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4.slice: Failed to open /run/user/3001/systemd/transient/user-libpod_pod_a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4.slice: No such file or directory\nJul 07 20:15:25 managed-node2 systemd[27808]: user-libpod_pod_a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4.slice: Failed to open /run/user/3001/systemd/transient/user-libpod_pod_a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4.slice: No such file or directory\nJul 07 20:15:26 managed-node2 podman[46347]: Pods stopped:\nJul 07 20:15:26 managed-node2 podman[46347]: a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4\nJul 07 20:15:26 managed-node2 podman[46347]: Pods removed:\nJul 07 20:15:26 managed-node2 podman[46347]: a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4\nJul 07 20:15:26 managed-node2 podman[46347]: Secrets removed:\nJul 07 20:15:26 managed-node2 podman[46347]: Volumes removed:\nJul 07 20:15:26 managed-node2 systemd[27808]: Stopped A template for running K8s workloads via podman-kube-play.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 150 and the job result is done.\nJul 07 20:15:26 managed-node2 sudo[46330]: pam_unix(sudo:session): session closed for user podman_basic_user\nJul 07 20:15:26 managed-node2 python3.9[46572]: ansible-stat Invoked with path=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:15:26 managed-node2 sudo[46723]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:15:26 managed-node2 sudo[46723]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ngscqobpoontmmgbeazhbfnhtlrerjma ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933726.6496177-17450-135897393375501/AnsiballZ_podman_play.py'\nJul 07 20:15:26 managed-node2 sudo[46723]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0)\nJul 07 20:15:26 managed-node2 python3.9[46725]: ansible-containers.podman.podman_play Invoked with state=absent debug=True log_level=debug kube_file=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None quiet=None recreate=None userns=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None\nJul 07 20:15:26 managed-node2 python3.9[46725]: ansible-containers.podman.podman_play version: 5.5.1, kube file /home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml\nJul 07 20:15:27 managed-node2 systemd[27808]: Started podman-46732.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 152.\nJul 07 20:15:27 managed-node2 python3.9[46725]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE command: /bin/podman kube play --down /home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml\nJul 07 20:15:27 managed-node2 python3.9[46725]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stdout: Pods stopped:\n Pods removed:\n Secrets removed:\n Volumes removed:\nJul 07 20:15:27 managed-node2 python3.9[46725]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stderr: \nJul 07 20:15:27 managed-node2 python3.9[46725]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE rc: 0\nJul 07 20:15:27 managed-node2 sudo[46723]: pam_unix(sudo:session): session closed for user podman_basic_user\nJul 07 20:15:27 managed-node2 python3.9[46888]: ansible-file Invoked with path=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:15:28 managed-node2 python3.9[47037]: ansible-getent Invoked with database=passwd key=root fail_key=False service=None split=None\nJul 07 20:15:29 managed-node2 python3.9[47187]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:15:31 managed-node2 python3.9[47338]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/httpd2.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:15:31 managed-node2 python3.9[47488]: ansible-systemd Invoked with name=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service scope=system state=stopped enabled=False daemon_reload=False daemon_reexec=False no_block=False force=None masked=None\nJul 07 20:15:31 managed-node2 systemd[1]: Reloading.\nJul 07 20:15:31 managed-node2 systemd-rc-local-generator[47509]: /etc/rc.d/rc.local is not marked executable, skipping.\nJul 07 20:15:32 managed-node2 systemd[1]: Stopping A template for running K8s workloads via podman-kube-play...\n\u2591\u2591 Subject: A stop job for unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1768.\nJul 07 20:15:32 managed-node2 podman[47527]: 2025-07-07 20:15:32.086748492 -0400 EDT m=+0.031435423 pod stop d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0 (image=, name=httpd2)\nJul 07 20:15:42 managed-node2 podman[47527]: time=\"2025-07-07T20:15:42-04:00\" level=warning msg=\"StopSignal SIGTERM failed to stop container httpd2-httpd2 in 10 seconds, resorting to SIGKILL\"\nJul 07 20:15:42 managed-node2 systemd[1]: libpod-3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5.scope: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit libpod-3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5.scope has successfully entered the 'dead' state.\nJul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.121044923 -0400 EDT m=+10.065732151 container died 3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service, app=test)\nJul 07 20:15:42 managed-node2 systemd[1]: var-lib-containers-storage-overlay-6f2f0e89c245bbf36545733fa9225bf8ac05d0ba658f3773aea7623e3da19632-merged.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay-6f2f0e89c245bbf36545733fa9225bf8ac05d0ba658f3773aea7623e3da19632-merged.mount has successfully entered the 'dead' state.\nJul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.165231828 -0400 EDT m=+10.109918731 container cleanup 3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0)\nJul 07 20:15:42 managed-node2 systemd[1]: libpod-08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b.scope: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit libpod-08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b.scope has successfully entered the 'dead' state.\nJul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.176672676 -0400 EDT m=+10.121359827 container died 08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b (image=, name=d17e7ef2e094-infra, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service)\nJul 07 20:15:42 managed-node2 kernel: podman1: port 1(veth0) entered disabled state\nJul 07 20:15:42 managed-node2 kernel: veth0 (unregistering): left allmulticast mode\nJul 07 20:15:42 managed-node2 kernel: veth0 (unregistering): left promiscuous mode\nJul 07 20:15:42 managed-node2 kernel: podman1: port 1(veth0) entered disabled state\nJul 07 20:15:42 managed-node2 systemd[1]: run-netns-netns\\x2d2e00ab52\\x2d0e7b\\x2d94ee\\x2da345\\x2dec17caccc43b.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit run-netns-netns\\x2d2e00ab52\\x2d0e7b\\x2d94ee\\x2da345\\x2dec17caccc43b.mount has successfully entered the 'dead' state.\nJul 07 20:15:42 managed-node2 systemd[1]: var-lib-containers-storage-overlay\\x2dcontainers-08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b-rootfs-merge.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay\\x2dcontainers-08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b-rootfs-merge.mount has successfully entered the 'dead' state.\nJul 07 20:15:42 managed-node2 systemd[1]: var-lib-containers-storage-overlay\\x2dcontainers-08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b-userdata-shm.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay\\x2dcontainers-08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b-userdata-shm.mount has successfully entered the 'dead' state.\nJul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.276854989 -0400 EDT m=+10.221541921 container cleanup 08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b (image=, name=d17e7ef2e094-infra, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service)\nJul 07 20:15:42 managed-node2 systemd[1]: Removed slice cgroup machine-libpod_pod_d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0.slice.\n\u2591\u2591 Subject: A stop job for unit machine-libpod_pod_d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0.slice has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit machine-libpod_pod_d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0.slice has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1770 and the job result is done.\nJul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.302687887 -0400 EDT m=+10.247374820 container remove 3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry)\nJul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.329086658 -0400 EDT m=+10.273773592 container remove 08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b (image=, name=d17e7ef2e094-infra, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service)\nJul 07 20:15:42 managed-node2 systemd[1]: machine-libpod_pod_d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0.slice: Failed to open /run/systemd/transient/machine-libpod_pod_d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0.slice: No such file or directory\nJul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.337213217 -0400 EDT m=+10.281900117 pod remove d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0 (image=, name=httpd2)\nJul 07 20:15:42 managed-node2 systemd[1]: libpod-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32.scope: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit libpod-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32.scope has successfully entered the 'dead' state.\nJul 07 20:15:42 managed-node2 conmon[32226]: conmon 78627b3638a40af7f868 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32.scope/container/memory.events\nJul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.343750156 -0400 EDT m=+10.288437240 container kill 78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32 (image=, name=91ecf1609ad1-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service)\nJul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.349945997 -0400 EDT m=+10.294633088 container died 78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32 (image=, name=91ecf1609ad1-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service)\nJul 07 20:15:42 managed-node2 systemd[1]: var-lib-containers-storage-overlay\\x2dcontainers-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32-rootfs-merge.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay\\x2dcontainers-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32-rootfs-merge.mount has successfully entered the 'dead' state.\nJul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.409484257 -0400 EDT m=+10.354171379 container remove 78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32 (image=, name=91ecf1609ad1-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service)\nJul 07 20:15:42 managed-node2 podman[47527]: Pods stopped:\nJul 07 20:15:42 managed-node2 podman[47527]: d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0\nJul 07 20:15:42 managed-node2 podman[47527]: Pods removed:\nJul 07 20:15:42 managed-node2 podman[47527]: d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0\nJul 07 20:15:42 managed-node2 podman[47527]: Secrets removed:\nJul 07 20:15:42 managed-node2 podman[47527]: Volumes removed:\nJul 07 20:15:42 managed-node2 systemd[1]: podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service has successfully entered the 'dead' state.\nJul 07 20:15:42 managed-node2 systemd[1]: Stopped A template for running K8s workloads via podman-kube-play.\n\u2591\u2591 Subject: A stop job for unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1768 and the job result is done.\nJul 07 20:15:42 managed-node2 python3.9[47729]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd2.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:15:43 managed-node2 systemd[1]: var-lib-containers-storage-overlay\\x2dcontainers-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32-userdata-shm.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay\\x2dcontainers-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32-userdata-shm.mount has successfully entered the 'dead' state.\nJul 07 20:15:43 managed-node2 python3.9[47880]: ansible-containers.podman.podman_play Invoked with state=absent debug=True log_level=debug kube_file=/etc/containers/ansible-kubernetes.d/httpd2.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None quiet=None recreate=None userns=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None\nJul 07 20:15:43 managed-node2 python3.9[47880]: ansible-containers.podman.podman_play version: 5.5.1, kube file /etc/containers/ansible-kubernetes.d/httpd2.yml\nJul 07 20:15:43 managed-node2 python3.9[47880]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE command: /usr/bin/podman kube play --down /etc/containers/ansible-kubernetes.d/httpd2.yml\nJul 07 20:15:43 managed-node2 python3.9[47880]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stdout: Pods stopped:\n Pods removed:\n Secrets removed:\n Volumes removed:\nJul 07 20:15:43 managed-node2 python3.9[47880]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stderr: \nJul 07 20:15:43 managed-node2 python3.9[47880]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE rc: 0\nJul 07 20:15:43 managed-node2 python3.9[48043]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d/httpd2.yml state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:15:45 managed-node2 python3.9[48192]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:15:46 managed-node2 python3.9[48343]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/httpd3.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:15:47 managed-node2 python3.9[48493]: ansible-systemd Invoked with name=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service scope=system state=stopped enabled=False daemon_reload=False daemon_reexec=False no_block=False force=None masked=None\nJul 07 20:15:47 managed-node2 systemd[1]: Reloading.\nJul 07 20:15:47 managed-node2 systemd-rc-local-generator[48513]: /etc/rc.d/rc.local is not marked executable, skipping.\nJul 07 20:15:47 managed-node2 systemd[1]: Stopping A template for running K8s workloads via podman-kube-play...\n\u2591\u2591 Subject: A stop job for unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1771.\nJul 07 20:15:47 managed-node2 podman[48533]: 2025-07-07 20:15:47.405787867 -0400 EDT m=+0.031643471 pod stop 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36 (image=, name=httpd3)\nJul 07 20:15:57 managed-node2 podman[48533]: time=\"2025-07-07T20:15:57-04:00\" level=warning msg=\"StopSignal SIGTERM failed to stop container httpd3-httpd3 in 10 seconds, resorting to SIGKILL\"\nJul 07 20:15:57 managed-node2 systemd[1]: libpod-9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109.scope: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit libpod-9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109.scope has successfully entered the 'dead' state.\nJul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.434010239 -0400 EDT m=+10.059866007 container died 9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0)\nJul 07 20:15:57 managed-node2 systemd[1]: var-lib-containers-storage-overlay-628129360f5470c8a5e4c9e68712c0420c79d4a01d22a8088c316ba43c268778-merged.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay-628129360f5470c8a5e4c9e68712c0420c79d4a01d22a8088c316ba43c268778-merged.mount has successfully entered the 'dead' state.\nJul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.478667416 -0400 EDT m=+10.104522986 container cleanup 9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service)\nJul 07 20:15:57 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state.\nJul 07 20:15:57 managed-node2 systemd[1]: libpod-e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641.scope: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit libpod-e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641.scope has successfully entered the 'dead' state.\nJul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.500190016 -0400 EDT m=+10.126045733 container died e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641 (image=, name=0b34d5e9f949-infra, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service)\nJul 07 20:15:57 managed-node2 systemd[1]: run-rce7152e4cf79441b86b3f3ed7d6f4283.scope: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit run-rce7152e4cf79441b86b3f3ed7d6f4283.scope has successfully entered the 'dead' state.\nJul 07 20:15:57 managed-node2 kernel: podman1: port 2(veth1) entered disabled state\nJul 07 20:15:57 managed-node2 kernel: veth1 (unregistering): left allmulticast mode\nJul 07 20:15:57 managed-node2 kernel: veth1 (unregistering): left promiscuous mode\nJul 07 20:15:57 managed-node2 kernel: podman1: port 2(veth1) entered disabled state\nJul 07 20:15:57 managed-node2 NetworkManager[644]: [1751933757.5423] device (podman1): state change: activated -> unmanaged (reason 'unmanaged', managed-type: 'removed')\nJul 07 20:15:57 managed-node2 systemd[1]: Starting Network Manager Script Dispatcher Service...\n\u2591\u2591 Subject: A start job for unit NetworkManager-dispatcher.service has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit NetworkManager-dispatcher.service has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1773.\nJul 07 20:15:57 managed-node2 systemd[1]: Started Network Manager Script Dispatcher Service.\n\u2591\u2591 Subject: A start job for unit NetworkManager-dispatcher.service has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit NetworkManager-dispatcher.service has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1773.\nJul 07 20:15:57 managed-node2 systemd[1]: run-netns-netns\\x2db2b0269b\\x2d6f52\\x2d704b\\x2de0f2\\x2d936fd9832ebd.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit run-netns-netns\\x2db2b0269b\\x2d6f52\\x2d704b\\x2de0f2\\x2d936fd9832ebd.mount has successfully entered the 'dead' state.\nJul 07 20:15:57 managed-node2 systemd[1]: var-lib-containers-storage-overlay\\x2dcontainers-e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641-rootfs-merge.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay\\x2dcontainers-e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641-rootfs-merge.mount has successfully entered the 'dead' state.\nJul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.72272296 -0400 EDT m=+10.348578562 container cleanup e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641 (image=, name=0b34d5e9f949-infra, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service)\nJul 07 20:15:57 managed-node2 systemd[1]: Removed slice cgroup machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice.\n\u2591\u2591 Subject: A stop job for unit machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1839 and the job result is done.\nJul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.730067956 -0400 EDT m=+10.355923539 pod stop 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36 (image=, name=httpd3)\nJul 07 20:15:57 managed-node2 systemd[1]: machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice: Failed to open /run/systemd/transient/machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice: No such file or directory\nJul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.736214931 -0400 EDT m=+10.362070507 pod stop 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36 (image=, name=httpd3)\nJul 07 20:15:57 managed-node2 systemd[1]: machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice: Failed to open /run/systemd/transient/machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice: No such file or directory\nJul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.760284407 -0400 EDT m=+10.386140034 container remove 9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service)\nJul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.786340751 -0400 EDT m=+10.412196374 container remove e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641 (image=, name=0b34d5e9f949-infra, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service)\nJul 07 20:15:57 managed-node2 systemd[1]: machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice: Failed to open /run/systemd/transient/machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice: No such file or directory\nJul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.794425826 -0400 EDT m=+10.420281396 pod remove 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36 (image=, name=httpd3)\nJul 07 20:15:57 managed-node2 systemd[1]: libpod-eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc.scope: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit libpod-eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc.scope has successfully entered the 'dead' state.\nJul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.797955293 -0400 EDT m=+10.423811069 container kill eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc (image=, name=55c9b4d93968-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service)\nJul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.805138661 -0400 EDT m=+10.430994476 container died eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc (image=, name=55c9b4d93968-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service)\nJul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.86757777 -0400 EDT m=+10.493433378 container remove eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc (image=, name=55c9b4d93968-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service)\nJul 07 20:15:57 managed-node2 podman[48533]: Pods stopped:\nJul 07 20:15:57 managed-node2 podman[48533]: 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36\nJul 07 20:15:57 managed-node2 podman[48533]: Pods removed:\nJul 07 20:15:57 managed-node2 podman[48533]: 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36\nJul 07 20:15:57 managed-node2 podman[48533]: Secrets removed:\nJul 07 20:15:57 managed-node2 podman[48533]: Volumes removed:\nJul 07 20:15:57 managed-node2 systemd[1]: podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service has successfully entered the 'dead' state.\nJul 07 20:15:57 managed-node2 systemd[1]: Stopped A template for running K8s workloads via podman-kube-play.\n\u2591\u2591 Subject: A stop job for unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1771 and the job result is done.\nJul 07 20:15:58 managed-node2 python3.9[48770]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd3.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:15:58 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state.\nJul 07 20:15:58 managed-node2 systemd[1]: var-lib-containers-storage-overlay\\x2dcontainers-e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641-userdata-shm.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay\\x2dcontainers-e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641-userdata-shm.mount has successfully entered the 'dead' state.\nJul 07 20:15:58 managed-node2 systemd[1]: var-lib-containers-storage-overlay\\x2dcontainers-eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc-rootfs-merge.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay\\x2dcontainers-eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc-rootfs-merge.mount has successfully entered the 'dead' state.\nJul 07 20:15:58 managed-node2 systemd[1]: var-lib-containers-storage-overlay\\x2dcontainers-eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc-userdata-shm.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay\\x2dcontainers-eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc-userdata-shm.mount has successfully entered the 'dead' state.\nJul 07 20:15:58 managed-node2 python3.9[48921]: ansible-containers.podman.podman_play Invoked with state=absent kube_file=/etc/containers/ansible-kubernetes.d/httpd3.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None debug=None quiet=None recreate=None userns=None log_level=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None\nJul 07 20:15:58 managed-node2 python3.9[48921]: ansible-containers.podman.podman_play version: 5.5.1, kube file /etc/containers/ansible-kubernetes.d/httpd3.yml\nJul 07 20:15:58 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state.\nJul 07 20:15:59 managed-node2 python3.9[49083]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d/httpd3.yml state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:16:00 managed-node2 python3.9[49232]: ansible-getent Invoked with database=passwd key=podman_basic_user fail_key=True service=None split=None\nJul 07 20:16:00 managed-node2 python3.9[49382]: ansible-stat Invoked with path=/run/user/3001 follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:01 managed-node2 sudo[49533]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:16:01 managed-node2 sudo[49533]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-eyaoewgeadnpajdfsqkdmnspilftsmzm ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933761.058141-18477-143961090014844/AnsiballZ_podman_container_info.py'\nJul 07 20:16:01 managed-node2 sudo[49533]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0)\nJul 07 20:16:01 managed-node2 python3.9[49535]: ansible-containers.podman.podman_container_info Invoked with executable=podman name=None\nJul 07 20:16:01 managed-node2 systemd[27808]: Started podman-49536.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 156.\nJul 07 20:16:01 managed-node2 sudo[49533]: pam_unix(sudo:session): session closed for user podman_basic_user\nJul 07 20:16:01 managed-node2 sudo[49691]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:16:01 managed-node2 sudo[49691]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-csyhukllkdoqxtgiejqztpcafyureeyp ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933761.7158227-18498-173793406938955/AnsiballZ_command.py'\nJul 07 20:16:01 managed-node2 sudo[49691]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0)\nJul 07 20:16:01 managed-node2 python3.9[49693]: ansible-ansible.legacy.command Invoked with _raw_params=podman network ls -q _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:02 managed-node2 systemd[27808]: Started podman-49694.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 160.\nJul 07 20:16:02 managed-node2 sudo[49691]: pam_unix(sudo:session): session closed for user podman_basic_user\nJul 07 20:16:02 managed-node2 sudo[49850]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:16:02 managed-node2 sudo[49850]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nugdaehmscgbqoulldhoxbffneulpeqi ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933762.1564903-18516-230631949404561/AnsiballZ_command.py'\nJul 07 20:16:02 managed-node2 sudo[49850]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0)\nJul 07 20:16:02 managed-node2 python3.9[49852]: ansible-ansible.legacy.command Invoked with _raw_params=podman secret ls -n -q _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:02 managed-node2 systemd[27808]: Started podman-49853.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 164.\nJul 07 20:16:02 managed-node2 sudo[49850]: pam_unix(sudo:session): session closed for user podman_basic_user\nJul 07 20:16:02 managed-node2 python3.9[50009]: ansible-ansible.legacy.command Invoked with removes=/var/lib/systemd/linger/podman_basic_user _raw_params=loginctl disable-linger podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None stdin=None\nJul 07 20:16:02 managed-node2 systemd[1]: Stopping User Manager for UID 3001...\n\u2591\u2591 Subject: A stop job for unit user@3001.service has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit user@3001.service has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1840.\nJul 07 20:16:02 managed-node2 systemd[27808]: Activating special unit Exit the Session...\nJul 07 20:16:02 managed-node2 systemd[27808]: Stopping podman-pause-7fbe17c5.scope...\n\u2591\u2591 Subject: A stop job for unit UNIT has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 181.\nJul 07 20:16:02 managed-node2 systemd[27808]: Removed slice Slice /app/podman-kube.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 183 and the job result is done.\nJul 07 20:16:02 managed-node2 systemd[27808]: Removed slice cgroup user-libpod_pod_f60d38092e129513b51a0b2f07fc5347e46ea863cdd2a5b1d5752245e63e591c.slice.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 180 and the job result is done.\nJul 07 20:16:02 managed-node2 systemd[27808]: Stopped target Main User Target.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 174 and the job result is done.\nJul 07 20:16:02 managed-node2 systemd[27808]: Stopped target Basic System.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 187 and the job result is done.\nJul 07 20:16:02 managed-node2 systemd[27808]: Stopped target Paths.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 185 and the job result is done.\nJul 07 20:16:02 managed-node2 systemd[27808]: Stopped target Sockets.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 173 and the job result is done.\nJul 07 20:16:02 managed-node2 systemd[27808]: Stopped target Timers.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 188 and the job result is done.\nJul 07 20:16:02 managed-node2 systemd[27808]: Stopped Mark boot as successful after the user session has run 2 minutes.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 186 and the job result is done.\nJul 07 20:16:02 managed-node2 systemd[27808]: Stopped Daily Cleanup of User's Temporary Directories.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 172 and the job result is done.\nJul 07 20:16:02 managed-node2 dbus-broker[28296]: Dispatched 2118 messages @ 3(\u00b115)\u03bcs / message.\n\u2591\u2591 Subject: Dispatched 2118 messages\n\u2591\u2591 Defined-By: dbus-broker\n\u2591\u2591 Support: https://groups.google.com/forum/#!forum/bus1-devel\n\u2591\u2591 \n\u2591\u2591 This message is printed by dbus-broker when shutting down. It includes metric\n\u2591\u2591 information collected during the runtime of dbus-broker.\n\u2591\u2591 \n\u2591\u2591 The message lists the number of dispatched messages\n\u2591\u2591 (in this case 2118) as well as the mean time to\n\u2591\u2591 handling a single message. The time measurements exclude the time spent on\n\u2591\u2591 writing to and reading from the kernel.\nJul 07 20:16:02 managed-node2 systemd[27808]: Stopping D-Bus User Message Bus...\n\u2591\u2591 Subject: A stop job for unit UNIT has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 171.\nJul 07 20:16:02 managed-node2 systemd[27808]: Stopped Create User's Volatile Files and Directories.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 176 and the job result is done.\nJul 07 20:16:02 managed-node2 systemd[27808]: Stopped D-Bus User Message Bus.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 171 and the job result is done.\nJul 07 20:16:02 managed-node2 systemd[27808]: Stopped podman-pause-7fbe17c5.scope.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 181 and the job result is done.\nJul 07 20:16:02 managed-node2 systemd[27808]: Removed slice Slice /user.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 179 and the job result is done.\nJul 07 20:16:02 managed-node2 systemd[27808]: Closed D-Bus User Message Bus Socket.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 175 and the job result is done.\nJul 07 20:16:02 managed-node2 systemd[27808]: Removed slice User Application Slice.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 184 and the job result is done.\nJul 07 20:16:02 managed-node2 systemd[27808]: Reached target Shutdown.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 170.\nJul 07 20:16:02 managed-node2 systemd[27808]: Finished Exit the Session.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 169.\nJul 07 20:16:02 managed-node2 systemd[27808]: Reached target Exit the Session.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 168.\nJul 07 20:16:02 managed-node2 systemd[1]: user@3001.service: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit user@3001.service has successfully entered the 'dead' state.\nJul 07 20:16:02 managed-node2 systemd[1]: Stopped User Manager for UID 3001.\n\u2591\u2591 Subject: A stop job for unit user@3001.service has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit user@3001.service has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1840 and the job result is done.\nJul 07 20:16:02 managed-node2 systemd[1]: user@3001.service: Consumed 2.173s CPU time.\n\u2591\u2591 Subject: Resources consumed by unit runtime\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit user@3001.service completed and consumed the indicated resources.\nJul 07 20:16:02 managed-node2 systemd[1]: Stopping User Runtime Directory /run/user/3001...\n\u2591\u2591 Subject: A stop job for unit user-runtime-dir@3001.service has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit user-runtime-dir@3001.service has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1841.\nJul 07 20:16:03 managed-node2 systemd[1]: run-user-3001.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit run-user-3001.mount has successfully entered the 'dead' state.\nJul 07 20:16:03 managed-node2 systemd[1]: user-runtime-dir@3001.service: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit user-runtime-dir@3001.service has successfully entered the 'dead' state.\nJul 07 20:16:03 managed-node2 systemd[1]: Stopped User Runtime Directory /run/user/3001.\n\u2591\u2591 Subject: A stop job for unit user-runtime-dir@3001.service has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit user-runtime-dir@3001.service has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1841 and the job result is done.\nJul 07 20:16:03 managed-node2 systemd[1]: Removed slice User Slice of UID 3001.\n\u2591\u2591 Subject: A stop job for unit user-3001.slice has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit user-3001.slice has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1843 and the job result is done.\nJul 07 20:16:03 managed-node2 systemd[1]: user-3001.slice: Consumed 2.196s CPU time.\n\u2591\u2591 Subject: Resources consumed by unit runtime\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit user-3001.slice completed and consumed the indicated resources.\nJul 07 20:16:03 managed-node2 python3.9[50161]: ansible-ansible.legacy.command Invoked with _raw_params=loginctl show-user --value -p State podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:04 managed-node2 sudo[50311]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:16:04 managed-node2 sudo[50311]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dpicetofkbhrscpezuhafexsvxxmiwru ; /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933764.0345392-18589-160568711821160/AnsiballZ_command.py'\nJul 07 20:16:04 managed-node2 sudo[50311]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0)\nJul 07 20:16:04 managed-node2 python3.9[50313]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod exists httpd1 _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:04 managed-node2 sudo[50311]: pam_unix(sudo:session): session closed for user podman_basic_user\nJul 07 20:16:04 managed-node2 python3.9[50468]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod exists httpd2 _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:04 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state.\nJul 07 20:16:05 managed-node2 python3.9[50624]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod exists httpd3 _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:05 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state.\nJul 07 20:16:05 managed-node2 sudo[50781]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:16:05 managed-node2 sudo[50781]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ulqlnfrznxyujwhsbktkenenjnaaarpn ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933765.2955933-18640-73245982266512/AnsiballZ_command.py'\nJul 07 20:16:05 managed-node2 sudo[50781]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0)\nJul 07 20:16:05 managed-node2 python3.9[50783]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail\n systemctl --user list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd1[.]yml[.]service[ ]+loaded[ ]+active '\n _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:05 managed-node2 sudo[50781]: pam_unix(sudo:session): session closed for user podman_basic_user\nJul 07 20:16:05 managed-node2 python3.9[50935]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail\n systemctl --system list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd2[.]yml[.]service[ ]+loaded[ ]+active '\n _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:06 managed-node2 python3.9[51087]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail\n systemctl --system list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd3[.]yml[.]service[ ]+loaded[ ]+active '\n _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:06 managed-node2 python3.9[51239]: ansible-stat Invoked with path=/var/lib/systemd/linger/podman_basic_user follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:07 managed-node2 systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit NetworkManager-dispatcher.service has successfully entered the 'dead' state.\nJul 07 20:16:09 managed-node2 python3.9[51537]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:10 managed-node2 python3.9[51692]: ansible-getent Invoked with database=passwd key=root fail_key=False service=None split=None\nJul 07 20:16:10 managed-node2 python3.9[51842]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:14 managed-node2 python3.9[51993]: ansible-getent Invoked with database=passwd key=podman_basic_user fail_key=False service=None split=None\nJul 07 20:16:14 managed-node2 python3.9[52143]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:15 managed-node2 python3.9[52294]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:15 managed-node2 python3.9[52444]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:17 managed-node2 python3.9[52594]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:17 managed-node2 python3.9[52744]: ansible-stat Invoked with path=/run/user/3001 follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:17 managed-node2 python3.9[52893]: ansible-stat Invoked with path=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:18 managed-node2 python3.9[53042]: ansible-file Invoked with path=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:16:19 managed-node2 python3.9[53191]: ansible-getent Invoked with database=passwd key=root fail_key=False service=None split=None\nJul 07 20:16:20 managed-node2 python3.9[53341]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:21 managed-node2 python3.9[53492]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/httpd2.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:21 managed-node2 python3.9[53642]: ansible-systemd Invoked with name=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service scope=system state=stopped enabled=False daemon_reload=False daemon_reexec=False no_block=False force=None masked=None\nJul 07 20:16:22 managed-node2 python3.9[53793]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd2.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:22 managed-node2 python3.9[53942]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d/httpd2.yml state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:16:23 managed-node2 python3.9[54091]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:25 managed-node2 python3.9[54242]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/httpd3.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:25 managed-node2 python3.9[54392]: ansible-systemd Invoked with name=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service scope=system state=stopped enabled=False daemon_reload=False daemon_reexec=False no_block=False force=None masked=None\nJul 07 20:16:26 managed-node2 python3.9[54543]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd3.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:26 managed-node2 python3.9[54692]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d/httpd3.yml state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:16:27 managed-node2 python3.9[54841]: ansible-getent Invoked with database=passwd key=podman_basic_user fail_key=True service=None split=None\nJul 07 20:16:28 managed-node2 python3.9[54991]: ansible-stat Invoked with path=/run/user/3001 follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:29 managed-node2 python3.9[55140]: ansible-file Invoked with path=/etc/containers/storage.conf state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:16:30 managed-node2 python3.9[55289]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:16:33 managed-node2 python3.9[55487]: ansible-ansible.legacy.setup Invoked with gather_subset=['all'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d\nJul 07 20:16:34 managed-node2 python3.9[55662]: ansible-stat Invoked with path=/run/ostree-booted follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:34 managed-node2 python3.9[55811]: ansible-stat Invoked with path=/sbin/transactional-update follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:36 managed-node2 python3.9[56109]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:37 managed-node2 python3.9[56264]: ansible-getent Invoked with database=passwd key=root fail_key=False service=None split=None\nJul 07 20:16:37 managed-node2 python3.9[56414]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:40 managed-node2 python3.9[56565]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:41 managed-node2 python3.9[56716]: ansible-file Invoked with path=/etc/containers/systemd state=directory owner=root group=0 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:16:41 managed-node2 python3.9[56865]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/systemd/quadlet-pod-pod.pod follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True\nJul 07 20:16:42 managed-node2 python3.9[56985]: ansible-ansible.legacy.copy Invoked with src=/root/.ansible/tmp/ansible-tmp-1751933801.5835004-19965-109711770661066/.source.pod dest=/etc/containers/systemd/quadlet-pod-pod.pod owner=root group=0 mode=0644 follow=False _original_basename=systemd.j2 checksum=1884c880482430d8bf2e944b003734fb8b7a462d backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:16:42 managed-node2 python3.9[57134]: ansible-systemd Invoked with daemon_reload=True scope=system daemon_reexec=False no_block=False name=None state=None enabled=None force=None masked=None\nJul 07 20:16:42 managed-node2 systemd[1]: Reloading.\nJul 07 20:16:43 managed-node2 systemd-rc-local-generator[57151]: /etc/rc.d/rc.local is not marked executable, skipping.\nJul 07 20:16:43 managed-node2 python3.9[57317]: ansible-systemd Invoked with name=quadlet-pod-pod-pod.service scope=system state=started daemon_reload=False daemon_reexec=False no_block=False enabled=None force=None masked=None\nJul 07 20:16:43 managed-node2 systemd[1]: Starting quadlet-pod-pod-pod.service...\n\u2591\u2591 Subject: A start job for unit quadlet-pod-pod-pod.service has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit quadlet-pod-pod-pod.service has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1845.\nJul 07 20:16:43 managed-node2 systemd[1]: var-lib-containers-storage-overlay-metacopy\\x2dcheck327374229-merged.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay-metacopy\\x2dcheck327374229-merged.mount has successfully entered the 'dead' state.\nJul 07 20:16:43 managed-node2 systemd[1]: Created slice cgroup machine-libpod_pod_e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79.slice.\n\u2591\u2591 Subject: A start job for unit machine-libpod_pod_e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79.slice has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit machine-libpod_pod_e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79.slice has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1916.\nJul 07 20:16:43 managed-node2 podman[57321]: 2025-07-07 20:16:43.753251921 -0400 EDT m=+0.075359120 container create 8854ba6a76c45d1f49cbb40fb6b5ea32b169bc30ffa29374b62851695b180a1c (image=, name=quadlet-pod-infra, pod_id=e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service)\nJul 07 20:16:43 managed-node2 podman[57321]: 2025-07-07 20:16:43.760005549 -0400 EDT m=+0.082112720 pod create e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79 (image=, name=quadlet-pod)\nJul 07 20:16:43 managed-node2 quadlet-pod-pod-pod[57321]: e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79\nJul 07 20:16:43 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state.\nJul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8066] manager: (podman0): new Bridge device (/org/freedesktop/NetworkManager/Devices/9)\nJul 07 20:16:43 managed-node2 kernel: podman0: port 1(veth0) entered blocking state\nJul 07 20:16:43 managed-node2 kernel: podman0: port 1(veth0) entered disabled state\nJul 07 20:16:43 managed-node2 kernel: veth0: entered allmulticast mode\nJul 07 20:16:43 managed-node2 kernel: veth0: entered promiscuous mode\nJul 07 20:16:43 managed-node2 kernel: podman0: port 1(veth0) entered blocking state\nJul 07 20:16:43 managed-node2 kernel: podman0: port 1(veth0) entered forwarding state\nJul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8206] manager: (veth0): new Veth device (/org/freedesktop/NetworkManager/Devices/10)\nJul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8221] device (veth0): carrier: link connected\nJul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8226] device (podman0): carrier: link connected\nJul 07 20:16:43 managed-node2 systemd-udevd[57347]: Network interface NamePolicy= disabled on kernel command line.\nJul 07 20:16:43 managed-node2 systemd-udevd[57348]: Network interface NamePolicy= disabled on kernel command line.\nJul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8651] device (podman0): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external')\nJul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8658] device (podman0): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'external')\nJul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8668] device (podman0): Activation: starting connection 'podman0' (0dc63386-fc14-4ac2-8cee-25b24d1739b5)\nJul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8670] device (podman0): state change: disconnected -> prepare (reason 'none', managed-type: 'external')\nJul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8673] device (podman0): state change: prepare -> config (reason 'none', managed-type: 'external')\nJul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8676] device (podman0): state change: config -> ip-config (reason 'none', managed-type: 'external')\nJul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8679] device (podman0): state change: ip-config -> ip-check (reason 'none', managed-type: 'external')\nJul 07 20:16:43 managed-node2 systemd[1]: Starting Network Manager Script Dispatcher Service...\n\u2591\u2591 Subject: A start job for unit NetworkManager-dispatcher.service has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit NetworkManager-dispatcher.service has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1921.\nJul 07 20:16:43 managed-node2 systemd[1]: Started Network Manager Script Dispatcher Service.\n\u2591\u2591 Subject: A start job for unit NetworkManager-dispatcher.service has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit NetworkManager-dispatcher.service has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1921.\nJul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8930] device (podman0): state change: ip-check -> secondaries (reason 'none', managed-type: 'external')\nJul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8932] device (podman0): state change: secondaries -> activated (reason 'none', managed-type: 'external')\nJul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8938] device (podman0): Activation: successful, device activated.\nJul 07 20:16:43 managed-node2 systemd[1]: Started libcrun container.\n\u2591\u2591 Subject: A start job for unit libpod-8854ba6a76c45d1f49cbb40fb6b5ea32b169bc30ffa29374b62851695b180a1c.scope has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit libpod-8854ba6a76c45d1f49cbb40fb6b5ea32b169bc30ffa29374b62851695b180a1c.scope has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1987.\nJul 07 20:16:43 managed-node2 podman[57329]: 2025-07-07 20:16:43.975146229 -0400 EDT m=+0.200627141 container init 8854ba6a76c45d1f49cbb40fb6b5ea32b169bc30ffa29374b62851695b180a1c (image=, name=quadlet-pod-infra, pod_id=e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service)\nJul 07 20:16:43 managed-node2 podman[57329]: 2025-07-07 20:16:43.981266459 -0400 EDT m=+0.206747252 container start 8854ba6a76c45d1f49cbb40fb6b5ea32b169bc30ffa29374b62851695b180a1c (image=, name=quadlet-pod-infra, pod_id=e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service)\nJul 07 20:16:43 managed-node2 podman[57329]: 2025-07-07 20:16:43.987430031 -0400 EDT m=+0.212910758 pod start e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79 (image=, name=quadlet-pod)\nJul 07 20:16:43 managed-node2 quadlet-pod-pod-pod[57329]: quadlet-pod\nJul 07 20:16:43 managed-node2 systemd[1]: Started quadlet-pod-pod-pod.service.\n\u2591\u2591 Subject: A start job for unit quadlet-pod-pod-pod.service has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit quadlet-pod-pod-pod.service has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1845.\nJul 07 20:16:44 managed-node2 python3.9[57565]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:46 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state.\nJul 07 20:16:46 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state.\nJul 07 20:16:46 managed-node2 podman[57748]: 2025-07-07 20:16:46.651148299 -0400 EDT m=+0.387653928 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610\nJul 07 20:16:47 managed-node2 python3.9[57912]: ansible-file Invoked with path=/etc/containers/systemd state=directory owner=root group=0 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:16:47 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state.\nJul 07 20:16:47 managed-node2 python3.9[58061]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/systemd/quadlet-pod-container.container follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True\nJul 07 20:16:47 managed-node2 python3.9[58181]: ansible-ansible.legacy.copy Invoked with src=/root/.ansible/tmp/ansible-tmp-1751933807.2449324-20069-121688553430320/.source.container dest=/etc/containers/systemd/quadlet-pod-container.container owner=root group=0 mode=0644 follow=False _original_basename=systemd.j2 checksum=f0b5c8159fc3c65bf9310a371751609e4c1ba4c3 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:16:48 managed-node2 python3.9[58330]: ansible-systemd Invoked with daemon_reload=True scope=system daemon_reexec=False no_block=False name=None state=None enabled=None force=None masked=None\nJul 07 20:16:48 managed-node2 systemd[1]: Reloading.\nJul 07 20:16:48 managed-node2 systemd-rc-local-generator[58347]: /etc/rc.d/rc.local is not marked executable, skipping.\nJul 07 20:16:48 managed-node2 python3.9[58513]: ansible-systemd Invoked with name=quadlet-pod-container.service scope=system state=started daemon_reload=False daemon_reexec=False no_block=False enabled=None force=None masked=None\nJul 07 20:16:49 managed-node2 systemd[1]: Starting quadlet-pod-container.service...\n\u2591\u2591 Subject: A start job for unit quadlet-pod-container.service has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit quadlet-pod-container.service has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1992.\nJul 07 20:16:49 managed-node2 podman[58517]: 2025-07-07 20:16:49.074860568 -0400 EDT m=+0.046591683 container create b345bf186b4d8ce4960e19da7b04d5b12bd2095620bf2b36a22c1a624a5edc3e (image=quay.io/libpod/testimage:20210610, name=quadlet-pod-container, pod_id=e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79, PODMAN_SYSTEMD_UNIT=quadlet-pod-container.service, created_by=test/system/build-testimage, io.buildah.version=1.21.0, created_at=2021-06-10T18:55:36Z)\nJul 07 20:16:49 managed-node2 systemd[1]: var-lib-containers-storage-overlay-volatile\\x2dcheck976746358-merged.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay-volatile\\x2dcheck976746358-merged.mount has successfully entered the 'dead' state.\nJul 07 20:16:49 managed-node2 podman[58517]: 2025-07-07 20:16:49.117361038 -0400 EDT m=+0.089092248 container init b345bf186b4d8ce4960e19da7b04d5b12bd2095620bf2b36a22c1a624a5edc3e (image=quay.io/libpod/testimage:20210610, name=quadlet-pod-container, pod_id=e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79, created_by=test/system/build-testimage, io.buildah.version=1.21.0, created_at=2021-06-10T18:55:36Z, PODMAN_SYSTEMD_UNIT=quadlet-pod-container.service)\nJul 07 20:16:49 managed-node2 systemd[1]: Started quadlet-pod-container.service.\n\u2591\u2591 Subject: A start job for unit quadlet-pod-container.service has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit quadlet-pod-container.service has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1992.\nJul 07 20:16:49 managed-node2 podman[58517]: 2025-07-07 20:16:49.122201448 -0400 EDT m=+0.093932749 container start b345bf186b4d8ce4960e19da7b04d5b12bd2095620bf2b36a22c1a624a5edc3e (image=quay.io/libpod/testimage:20210610, name=quadlet-pod-container, pod_id=e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79, created_at=2021-06-10T18:55:36Z, PODMAN_SYSTEMD_UNIT=quadlet-pod-container.service, created_by=test/system/build-testimage, io.buildah.version=1.21.0)\nJul 07 20:16:49 managed-node2 quadlet-pod-container[58517]: b345bf186b4d8ce4960e19da7b04d5b12bd2095620bf2b36a22c1a624a5edc3e\nJul 07 20:16:49 managed-node2 podman[58517]: 2025-07-07 20:16:49.05249592 -0400 EDT m=+0.024227291 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610\nJul 07 20:16:49 managed-node2 python3.9[58679]: ansible-ansible.legacy.command Invoked with _raw_params=cat /etc/containers/systemd/quadlet-pod-container.container _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:49 managed-node2 python3.9[58829]: ansible-ansible.legacy.command Invoked with _raw_params=cat /etc/containers/systemd/quadlet-pod-pod.pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:50 managed-node2 python3.9[58979]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod inspect quadlet-pod --format '{{range .Containers}}{{.Name}}\n {{end}}' _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:50 managed-node2 python3.9[59137]: ansible-user Invoked with name=user_quadlet_pod uid=2223 state=present non_unique=False force=False remove=False create_home=True system=False move_home=False append=False ssh_key_bits=0 ssh_key_type=rsa ssh_key_comment=ansible-generated on managed-node2 update_password=always group=None groups=None comment=None home=None shell=None password=NOT_LOGGING_PARAMETER login_class=None password_expire_max=None password_expire_min=None password_expire_warn=None hidden=None seuser=None skeleton=None generate_ssh_key=None ssh_key_file=None ssh_key_passphrase=NOT_LOGGING_PARAMETER expires=None password_lock=None local=None profile=None authorization=None role=None umask=None\nJul 07 20:16:50 managed-node2 useradd[59139]: new group: name=user_quadlet_pod, GID=2223\nJul 07 20:16:50 managed-node2 useradd[59139]: new user: name=user_quadlet_pod, UID=2223, GID=2223, home=/home/user_quadlet_pod, shell=/bin/bash, from=/dev/pts/0\nJul 07 20:16:50 managed-node2 rsyslogd[812]: imjournal: journal files changed, reloading... [v8.2412.0-2.el9 try https://www.rsyslog.com/e/0 ]\nJul 07 20:16:52 managed-node2 python3.9[59444]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:53 managed-node2 python3.9[59599]: ansible-getent Invoked with database=passwd key=user_quadlet_pod fail_key=False service=None split=None\nJul 07 20:16:53 managed-node2 python3.9[59749]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:53 managed-node2 systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit NetworkManager-dispatcher.service has successfully entered the 'dead' state.\nJul 07 20:16:54 managed-node2 python3.9[59900]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:54 managed-node2 python3.9[60050]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:56 managed-node2 python3.9[60200]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:57 managed-node2 python3.9[60351]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:57 managed-node2 python3.9[60501]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:58 managed-node2 python3.9[60651]: ansible-ansible.legacy.command Invoked with creates=/var/lib/systemd/linger/user_quadlet_pod _raw_params=loginctl enable-linger user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None removes=None stdin=None\nJul 07 20:16:58 managed-node2 systemd[1]: Created slice User Slice of UID 2223.\n\u2591\u2591 Subject: A start job for unit user-2223.slice has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit user-2223.slice has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 2130.\nJul 07 20:16:58 managed-node2 systemd[1]: Starting User Runtime Directory /run/user/2223...\n\u2591\u2591 Subject: A start job for unit user-runtime-dir@2223.service has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit user-runtime-dir@2223.service has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 2065.\nJul 07 20:16:58 managed-node2 systemd[1]: Finished User Runtime Directory /run/user/2223.\n\u2591\u2591 Subject: A start job for unit user-runtime-dir@2223.service has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit user-runtime-dir@2223.service has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 2065.\nJul 07 20:16:58 managed-node2 systemd[1]: Starting User Manager for UID 2223...\n\u2591\u2591 Subject: A start job for unit user@2223.service has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit user@2223.service has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 2064.\nJul 07 20:16:58 managed-node2 systemd[60658]: pam_unix(systemd-user:session): session opened for user user_quadlet_pod(uid=2223) by user_quadlet_pod(uid=0)\nJul 07 20:16:58 managed-node2 systemd[60658]: Queued start job for default target Main User Target.\nJul 07 20:16:58 managed-node2 systemd[60658]: Created slice User Application Slice.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 4.\nJul 07 20:16:58 managed-node2 systemd[60658]: Started Mark boot as successful after the user session has run 2 minutes.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 8.\nJul 07 20:16:58 managed-node2 systemd[60658]: Started Daily Cleanup of User's Temporary Directories.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 9.\nJul 07 20:16:58 managed-node2 systemd[60658]: Reached target Paths.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 12.\nJul 07 20:16:58 managed-node2 systemd[60658]: Reached target Timers.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 7.\nJul 07 20:16:58 managed-node2 systemd[60658]: Starting D-Bus User Message Bus Socket...\n\u2591\u2591 Subject: A start job for unit UNIT has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 11.\nJul 07 20:16:58 managed-node2 systemd[60658]: Starting Create User's Volatile Files and Directories...\n\u2591\u2591 Subject: A start job for unit UNIT has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 3.\nJul 07 20:16:58 managed-node2 systemd[60658]: Listening on D-Bus User Message Bus Socket.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 11.\nJul 07 20:16:58 managed-node2 systemd[60658]: Finished Create User's Volatile Files and Directories.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 3.\nJul 07 20:16:58 managed-node2 systemd[60658]: Reached target Sockets.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 10.\nJul 07 20:16:58 managed-node2 systemd[60658]: Reached target Basic System.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 2.\nJul 07 20:16:58 managed-node2 systemd[60658]: Reached target Main User Target.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1.\nJul 07 20:16:58 managed-node2 systemd[60658]: Startup finished in 65ms.\n\u2591\u2591 Subject: User manager start-up is now complete\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The user manager instance for user 2223 has been started. All services queued\n\u2591\u2591 for starting have been started. Note that other services might still be starting\n\u2591\u2591 up or be started at any later time.\n\u2591\u2591 \n\u2591\u2591 Startup of the manager took 65603 microseconds.\nJul 07 20:16:58 managed-node2 systemd[1]: Started User Manager for UID 2223.\n\u2591\u2591 Subject: A start job for unit user@2223.service has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit user@2223.service has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 2064.\nJul 07 20:16:59 managed-node2 python3.9[60817]: ansible-file Invoked with path=/home/user_quadlet_pod/.config/containers/systemd state=directory owner=user_quadlet_pod group=2223 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:16:59 managed-node2 python3.9[60966]: ansible-ansible.legacy.stat Invoked with path=/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-pod.pod follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True\nJul 07 20:17:00 managed-node2 python3.9[61086]: ansible-ansible.legacy.copy Invoked with src=/root/.ansible/tmp/ansible-tmp-1751933819.4453251-20404-152886448732131/.source.pod dest=/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-pod.pod owner=user_quadlet_pod group=2223 mode=0644 follow=False _original_basename=systemd.j2 checksum=1884c880482430d8bf2e944b003734fb8b7a462d backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:17:00 managed-node2 sudo[61235]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:17:00 managed-node2 sudo[61235]: root : TTY=pts/0 ; PWD=/root ; USER=user_quadlet_pod ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qycimnmrylvnpkxuzbdovgpddpoemvav ; XDG_RUNTIME_DIR=/run/user/2223 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933820.1430469-20434-39126838474860/AnsiballZ_systemd.py'\nJul 07 20:17:00 managed-node2 sudo[61235]: pam_unix(sudo:session): session opened for user user_quadlet_pod(uid=2223) by root(uid=0)\nJul 07 20:17:00 managed-node2 python3.9[61237]: ansible-systemd Invoked with daemon_reload=True scope=user daemon_reexec=False no_block=False name=None state=None enabled=None force=None masked=None\nJul 07 20:17:00 managed-node2 python3.9[61237]: ansible-systemd [WARNING] Module remote_tmp /home/user_quadlet_pod/.ansible/tmp did not exist and was created with a mode of 0700, this may cause issues when running as another user. To avoid this, create the remote_tmp dir with the correct permissions manually\nJul 07 20:17:00 managed-node2 systemd[60658]: Reloading.\nJul 07 20:17:00 managed-node2 sudo[61235]: pam_unix(sudo:session): session closed for user user_quadlet_pod\nJul 07 20:17:00 managed-node2 sudo[61397]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:17:00 managed-node2 sudo[61397]: root : TTY=pts/0 ; PWD=/root ; USER=user_quadlet_pod ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nnnqmwerpaczbueaimzsjafnuajzqvhs ; XDG_RUNTIME_DIR=/run/user/2223 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933820.7074661-20450-27497593035399/AnsiballZ_systemd.py'\nJul 07 20:17:00 managed-node2 sudo[61397]: pam_unix(sudo:session): session opened for user user_quadlet_pod(uid=2223) by root(uid=0)\nJul 07 20:17:01 managed-node2 python3.9[61399]: ansible-systemd Invoked with name=quadlet-pod-pod-pod.service scope=user state=started daemon_reload=False daemon_reexec=False no_block=False enabled=None force=None masked=None\nJul 07 20:17:01 managed-node2 systemd[60658]: Starting Wait for system level network-online.target as user....\n\u2591\u2591 Subject: A start job for unit UNIT has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 25.\nJul 07 20:17:01 managed-node2 sh[61403]: active\nJul 07 20:17:01 managed-node2 systemd[60658]: Finished Wait for system level network-online.target as user..\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 25.\nJul 07 20:17:01 managed-node2 systemd[60658]: Starting quadlet-pod-pod-pod.service...\n\u2591\u2591 Subject: A start job for unit UNIT has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 13.\nJul 07 20:17:01 managed-node2 systemd[60658]: Starting D-Bus User Message Bus...\n\u2591\u2591 Subject: A start job for unit UNIT has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 26.\nJul 07 20:17:01 managed-node2 dbus-broker-launch[61428]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +31: Eavesdropping is deprecated and ignored\nJul 07 20:17:01 managed-node2 systemd[60658]: Started D-Bus User Message Bus.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 26.\nJul 07 20:17:01 managed-node2 dbus-broker-launch[61428]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +33: Eavesdropping is deprecated and ignored\nJul 07 20:17:01 managed-node2 dbus-broker-lau[61428]: Ready\nJul 07 20:17:01 managed-node2 systemd[60658]: Created slice Slice /user.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 32.\nJul 07 20:17:01 managed-node2 systemd[60658]: Created slice cgroup user-libpod_pod_abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658.slice.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 31.\nJul 07 20:17:01 managed-node2 quadlet-pod-pod-pod[61411]: abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658\nJul 07 20:17:01 managed-node2 systemd[60658]: podman-pause-d252ab55.scope: unit configures an IP firewall, but not running as root.\nJul 07 20:17:01 managed-node2 systemd[60658]: (This warning is only shown for the first unit using IP firewalling.)\nJul 07 20:17:01 managed-node2 systemd[60658]: Started podman-pause-d252ab55.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 35.\nJul 07 20:17:01 managed-node2 systemd[60658]: Started libcrun container.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 39.\nJul 07 20:17:01 managed-node2 quadlet-pod-pod-pod[61431]: quadlet-pod\nJul 07 20:17:01 managed-node2 systemd[60658]: Started quadlet-pod-pod-pod.service.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 13.\nJul 07 20:17:01 managed-node2 sudo[61397]: pam_unix(sudo:session): session closed for user user_quadlet_pod\nJul 07 20:17:02 managed-node2 python3.9[61604]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:17:02 managed-node2 python3.9[61755]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:17:02 managed-node2 python3.9[61905]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:17:04 managed-node2 python3.9[62055]: ansible-ansible.legacy.command Invoked with creates=/var/lib/systemd/linger/user_quadlet_pod _raw_params=loginctl enable-linger user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None removes=None stdin=None\nJul 07 20:17:04 managed-node2 sudo[62204]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:17:04 managed-node2 sudo[62204]: root : TTY=pts/0 ; PWD=/root ; USER=user_quadlet_pod ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-clqautntenvhsnblukvtrjsozolrrutk ; XDG_RUNTIME_DIR=/run/user/2223 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933824.2704933-20565-153115325770627/AnsiballZ_podman_image.py'\nJul 07 20:17:04 managed-node2 sudo[62204]: pam_unix(sudo:session): session opened for user user_quadlet_pod(uid=2223) by root(uid=0)\nJul 07 20:17:04 managed-node2 systemd[60658]: Started podman-62207.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 44.\nJul 07 20:17:04 managed-node2 systemd[60658]: Started podman-62215.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 48.\nJul 07 20:17:05 managed-node2 systemd[60658]: Started podman-62241.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 52.\nJul 07 20:17:05 managed-node2 sudo[62204]: pam_unix(sudo:session): session closed for user user_quadlet_pod\nJul 07 20:17:05 managed-node2 python3.9[62397]: ansible-file Invoked with path=/home/user_quadlet_pod/.config/containers/systemd state=directory owner=user_quadlet_pod group=2223 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:17:06 managed-node2 python3.9[62546]: ansible-ansible.legacy.stat Invoked with path=/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-container.container follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True\nJul 07 20:17:06 managed-node2 python3.9[62666]: ansible-ansible.legacy.copy Invoked with src=/root/.ansible/tmp/ansible-tmp-1751933826.0010831-20615-171147992892008/.source.container dest=/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-container.container owner=user_quadlet_pod group=2223 mode=0644 follow=False _original_basename=systemd.j2 checksum=f0b5c8159fc3c65bf9310a371751609e4c1ba4c3 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:17:06 managed-node2 sudo[62815]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:17:06 managed-node2 sudo[62815]: root : TTY=pts/0 ; PWD=/root ; USER=user_quadlet_pod ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wvdbdvervsgjztpuoyolguwsbeqeprfj ; XDG_RUNTIME_DIR=/run/user/2223 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933826.7366147-20636-253031789963643/AnsiballZ_systemd.py'\nJul 07 20:17:06 managed-node2 sudo[62815]: pam_unix(sudo:session): session opened for user user_quadlet_pod(uid=2223) by root(uid=0)\nJul 07 20:17:07 managed-node2 python3.9[62817]: ansible-systemd Invoked with daemon_reload=True scope=user daemon_reexec=False no_block=False name=None state=None enabled=None force=None masked=None\nJul 07 20:17:07 managed-node2 systemd[60658]: Reloading.\nJul 07 20:17:07 managed-node2 sudo[62815]: pam_unix(sudo:session): session closed for user user_quadlet_pod\nJul 07 20:17:07 managed-node2 sudo[62977]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:17:07 managed-node2 sudo[62977]: root : TTY=pts/0 ; PWD=/root ; USER=user_quadlet_pod ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-uowxubkhbahtejstbxtpvjntqjsyenah ; XDG_RUNTIME_DIR=/run/user/2223 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933827.3300688-20652-5834244785399/AnsiballZ_systemd.py'\nJul 07 20:17:07 managed-node2 sudo[62977]: pam_unix(sudo:session): session opened for user user_quadlet_pod(uid=2223) by root(uid=0)\nJul 07 20:17:07 managed-node2 python3.9[62979]: ansible-systemd Invoked with name=quadlet-pod-container.service scope=user state=started daemon_reload=False daemon_reexec=False no_block=False enabled=None force=None masked=None\nJul 07 20:17:07 managed-node2 systemd[60658]: Starting quadlet-pod-container.service...\n\u2591\u2591 Subject: A start job for unit UNIT has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 56.\nJul 07 20:17:07 managed-node2 systemd[60658]: Started quadlet-pod-container.service.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 56.\nJul 07 20:17:07 managed-node2 quadlet-pod-container[62982]: b13865d1720ba2247935f8a907f43f29957f51e6b9e3476a39eb71da7ad9ebb6\nJul 07 20:17:07 managed-node2 sudo[62977]: pam_unix(sudo:session): session closed for user user_quadlet_pod\nJul 07 20:17:08 managed-node2 python3.9[63145]: ansible-ansible.legacy.command Invoked with _raw_params=cat /home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-container.container _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:17:08 managed-node2 python3.9[63295]: ansible-ansible.legacy.command Invoked with _raw_params=cat /home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-pod.pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:17:09 managed-node2 sudo[63445]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:17:09 managed-node2 sudo[63445]: root : TTY=pts/0 ; PWD=/root ; USER=user_quadlet_pod ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-smtrjtflbvxbkabzbxqvalutkdqpmdkb ; XDG_RUNTIME_DIR=/run/user/2223 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933828.9926739-20712-141410485382073/AnsiballZ_command.py'\nJul 07 20:17:09 managed-node2 sudo[63445]: pam_unix(sudo:session): session opened for user user_quadlet_pod(uid=2223) by root(uid=0)\nJul 07 20:17:09 managed-node2 python3.9[63447]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod inspect quadlet-pod --format '{{range .Containers}}{{.Name}}\n {{end}}' _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:17:09 managed-node2 systemd[60658]: Started podman-63448.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 70.\nJul 07 20:17:09 managed-node2 sudo[63445]: pam_unix(sudo:session): session closed for user user_quadlet_pod\nJul 07 20:17:09 managed-node2 python3.9[63604]: ansible-stat Invoked with path=/var/lib/systemd/linger/user_quadlet_pod follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:17:11 managed-node2 python3.9[63904]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:17:12 managed-node2 python3.9[64059]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:17:13 managed-node2 python3.9[64210]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:17:13 managed-node2 python3.9[64360]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:17:15 managed-node2 python3.9[64510]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:17:16 managed-node2 python3.9[64661]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:17:16 managed-node2 python3.9[64811]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:17:17 managed-node2 python3.9[64961]: ansible-stat Invoked with path=/run/user/2223 follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:17:18 managed-node2 sudo[65112]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:17:18 managed-node2 sudo[65112]: root : TTY=pts/0 ; PWD=/root ; USER=user_quadlet_pod ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mdrwdexxoegkvbfvoaccgnpcnrcnmlbz ; XDG_RUNTIME_DIR=/run/user/2223 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933838.0379803-20995-129290334053056/AnsiballZ_systemd.py'\nJul 07 20:17:18 managed-node2 sudo[65112]: pam_unix(sudo:session): session opened for user user_quadlet_pod(uid=2223) by root(uid=0)\nJul 07 20:17:18 managed-node2 python3.9[65114]: ansible-systemd Invoked with name=quadlet-pod-container.service scope=user state=stopped enabled=False force=True daemon_reload=False daemon_reexec=False no_block=False masked=None\nJul 07 20:17:18 managed-node2 systemd[60658]: Reloading.\nJul 07 20:17:18 managed-node2 systemd[60658]: Stopping quadlet-pod-container.service...\n\u2591\u2591 Subject: A stop job for unit UNIT has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 74.\nJul 07 20:17:28 managed-node2 quadlet-pod-container[65129]: time=\"2025-07-07T20:17:28-04:00\" level=warning msg=\"StopSignal SIGTERM failed to stop container quadlet-pod-container in 10 seconds, resorting to SIGKILL\"\nJul 07 20:17:28 managed-node2 quadlet-pod-container[65129]: b13865d1720ba2247935f8a907f43f29957f51e6b9e3476a39eb71da7ad9ebb6\nJul 07 20:17:28 managed-node2 systemd[60658]: quadlet-pod-container.service: Main process exited, code=exited, status=137/n/a\n\u2591\u2591 Subject: Unit process exited\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 An ExecStart= process belonging to unit UNIT has exited.\n\u2591\u2591 \n\u2591\u2591 The process' exit code is 'exited' and its exit status is 137.\nJul 07 20:17:28 managed-node2 systemd[60658]: Removed slice cgroup user-libpod_pod_abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658.slice.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 75 and the job result is done.\nJul 07 20:17:28 managed-node2 systemd[60658]: user-libpod_pod_abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658.slice: Failed to open /run/user/2223/systemd/transient/user-libpod_pod_abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658.slice: No such file or directory\nJul 07 20:17:28 managed-node2 systemd[60658]: quadlet-pod-container.service: Failed with result 'exit-code'.\n\u2591\u2591 Subject: Unit failed\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit UNIT has entered the 'failed' state with result 'exit-code'.\nJul 07 20:17:28 managed-node2 systemd[60658]: Stopped quadlet-pod-container.service.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 74 and the job result is done.\nJul 07 20:17:28 managed-node2 systemd[60658]: user-libpod_pod_abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658.slice: Failed to open /run/user/2223/systemd/transient/user-libpod_pod_abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658.slice: No such file or directory\nJul 07 20:17:28 managed-node2 quadlet-pod-pod-pod[65162]: quadlet-pod\nJul 07 20:17:28 managed-node2 sudo[65112]: pam_unix(sudo:session): session closed for user user_quadlet_pod\nJul 07 20:17:28 managed-node2 systemd[60658]: user-libpod_pod_abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658.slice: Failed to open /run/user/2223/systemd/transient/user-libpod_pod_abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658.slice: No such file or directory\nJul 07 20:17:28 managed-node2 quadlet-pod-pod-pod[65176]: abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658\nJul 07 20:17:29 managed-node2 python3.9[65334]: ansible-stat Invoked with path=/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-container.container follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:17:30 managed-node2 python3.9[65634]: ansible-ansible.legacy.command Invoked with _raw_params=set -x\n set -o pipefail\n exec 1>&2\n #podman volume rm --all\n #podman network prune -f\n podman volume ls\n podman network ls\n podman secret ls\n podman container ls\n podman pod ls\n podman images\n systemctl list-units | grep quadlet\n systemctl list-unit-files | grep quadlet\n ls -alrtF /etc/containers/systemd\n /usr/libexec/podman/quadlet -dryrun -v -no-kmsg-log\n _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:17:30 managed-node2 python3.9[65839]: ansible-ansible.legacy.command Invoked with _raw_params=grep type=AVC /var/log/audit/audit.log _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:17:31 managed-node2 python3.9[65989]: ansible-ansible.legacy.command Invoked with _raw_params=journalctl -ex _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None", "task_name": "Dump journal", "task_path": "/tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:151" }, { "ansible_version": "2.17.12", "end_time": "2025-07-08T00:17:39.817544+00:00Z", "host": "managed-node2", "message": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "start_time": "2025-07-08T00:17:39.799104+00:00Z", "task_name": "Parse quadlet file", "task_path": "/tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/parse_quadlet_file.yml:12" }, { "ansible_version": "2.17.12", "delta": "0:00:00.038388", "end_time": "2025-07-07 20:17:40.162217", "host": "managed-node2", "message": "", "rc": 0, "start_time": "2025-07-07 20:17:40.123829", "stdout": "Jul 07 20:13:28 managed-node2 aardvark-dns[29155]: Received SIGHUP\nJul 07 20:13:28 managed-node2 aardvark-dns[29155]: Successfully parsed config\nJul 07 20:13:28 managed-node2 aardvark-dns[29155]: Listen v4 ip {}\nJul 07 20:13:28 managed-node2 aardvark-dns[29155]: Listen v6 ip {}\nJul 07 20:13:28 managed-node2 aardvark-dns[29155]: No configuration found stopping the sever\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Called cleanup.PersistentPreRunE(/usr/bin/podman --root /home/podman_basic_user/.local/share/containers/storage --runroot /run/user/3001/containers --log-level debug --cgroup-manager systemd --tmpdir /run/user/3001/libpod/tmp --network-config-dir --network-backend netavark --volumepath /home/podman_basic_user/.local/share/containers/storage/volumes --db-backend sqlite --transient-store=false --runtime crun --storage-driver overlay --events-backend file --syslog container cleanup --stopped-only e9692bbfc519ef92cef48f387f6e39e18dec1d44e7caa03632016f9015c87147)\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Setting custom database backend: \\\"sqlite\\\"\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Using conmon: \\\"/usr/bin/conmon\\\"\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=info msg=\"Using sqlite as database backend\"\nJul 07 20:13:28 managed-node2 kernel: podman1: port 1(veth0) entered disabled state\nJul 07 20:13:28 managed-node2 kernel: veth0 (unregistering): left allmulticast mode\nJul 07 20:13:28 managed-node2 kernel: veth0 (unregistering): left promiscuous mode\nJul 07 20:13:28 managed-node2 kernel: podman1: port 1(veth0) entered disabled state\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"systemd-logind: Unknown object '/'.\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Using graph driver overlay\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Using graph root /home/podman_basic_user/.local/share/containers/storage\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Using run root /run/user/3001/containers\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Using static dir /home/podman_basic_user/.local/share/containers/storage/libpod\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Using tmp dir /run/user/3001/libpod/tmp\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Using volume path /home/podman_basic_user/.local/share/containers/storage/volumes\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Using transient store: false\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"[graphdriver] trying provided driver \\\"overlay\\\"\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Cached value indicated that overlay is supported\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Cached value indicated that overlay is supported\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Cached value indicated that metacopy is not being used\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Cached value indicated that native-diff is usable\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"backingFs=xfs, projectQuotaSupported=false, useNativeDiff=true, usingMetacopy=false\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Initializing event backend file\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Configured OCI runtime runj initialization failed: no valid executable found for OCI runtime runj: invalid argument\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Configured OCI runtime kata initialization failed: no valid executable found for OCI runtime kata: invalid argument\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Configured OCI runtime runsc initialization failed: no valid executable found for OCI runtime runsc: invalid argument\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Configured OCI runtime youki initialization failed: no valid executable found for OCI runtime youki: invalid argument\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Configured OCI runtime krun initialization failed: no valid executable found for OCI runtime krun: invalid argument\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Configured OCI runtime crun-wasm initialization failed: no valid executable found for OCI runtime crun-wasm: invalid argument\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Configured OCI runtime ocijail initialization failed: no valid executable found for OCI runtime ocijail: invalid argument\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Configured OCI runtime crun-vm initialization failed: no valid executable found for OCI runtime crun-vm: invalid argument\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Configured OCI runtime runc initialization failed: no valid executable found for OCI runtime runc: invalid argument\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Using OCI runtime \\\"/usr/bin/crun\\\"\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=info msg=\"Setting parallel job count to 7\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Called cleanup.PersistentPostRunE(/usr/bin/podman --root /home/podman_basic_user/.local/share/containers/storage --runroot /run/user/3001/containers --log-level debug --cgroup-manager systemd --tmpdir /run/user/3001/libpod/tmp --network-config-dir --network-backend netavark --volumepath /home/podman_basic_user/.local/share/containers/storage/volumes --db-backend sqlite --transient-store=false --runtime crun --storage-driver overlay --events-backend file --syslog container cleanup --stopped-only e9692bbfc519ef92cef48f387f6e39e18dec1d44e7caa03632016f9015c87147)\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=debug msg=\"Shutting down engines\"\nJul 07 20:13:28 managed-node2 /usr/bin/podman[29684]: time=\"2025-07-07T20:13:28-04:00\" level=info msg=\"Received shutdown.Stop(), terminating!\" PID=29684\nJul 07 20:13:28 managed-node2 systemd[27808]: Removed slice cgroup user-libpod_pod_0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f.slice.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 83 and the job result is done.\nJul 07 20:13:28 managed-node2 systemd[27808]: user-libpod_pod_0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f.slice: Failed to open /run/user/3001/systemd/transient/user-libpod_pod_0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f.slice: No such file or directory\nJul 07 20:13:28 managed-node2 systemd[27808]: user-libpod_pod_0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f.slice: Failed to open /run/user/3001/systemd/transient/user-libpod_pod_0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f.slice: No such file or directory\nJul 07 20:13:29 managed-node2 systemd[27808]: user-libpod_pod_0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f.slice: Failed to open /run/user/3001/systemd/transient/user-libpod_pod_0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f.slice: No such file or directory\nJul 07 20:13:29 managed-node2 podman[29660]: Pods stopped:\nJul 07 20:13:29 managed-node2 podman[29660]: 0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f\nJul 07 20:13:29 managed-node2 podman[29660]: Pods removed:\nJul 07 20:13:29 managed-node2 podman[29660]: 0e6df440f61a12704e79ae84c247bb70ea2190773161cdbdf4097503a725fc2f\nJul 07 20:13:29 managed-node2 podman[29660]: Secrets removed:\nJul 07 20:13:29 managed-node2 podman[29660]: Volumes removed:\nJul 07 20:13:29 managed-node2 systemd[27808]: Created slice cgroup user-libpod_pod_a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4.slice.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 84.\nJul 07 20:13:29 managed-node2 systemd[27808]: Started libcrun container.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 88.\nJul 07 20:13:29 managed-node2 systemd[27808]: Started rootless-netns-281f12f7.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 92.\nJul 07 20:13:29 managed-node2 kernel: podman1: port 1(veth0) entered blocking state\nJul 07 20:13:29 managed-node2 kernel: podman1: port 1(veth0) entered disabled state\nJul 07 20:13:29 managed-node2 kernel: veth0: entered allmulticast mode\nJul 07 20:13:29 managed-node2 kernel: veth0: entered promiscuous mode\nJul 07 20:13:29 managed-node2 kernel: podman1: port 1(veth0) entered blocking state\nJul 07 20:13:29 managed-node2 kernel: podman1: port 1(veth0) entered forwarding state\nJul 07 20:13:29 managed-node2 systemd[27808]: Started /usr/libexec/podman/aardvark-dns --config /run/user/3001/containers/networks/aardvark-dns -p 53 run.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 96.\nJul 07 20:13:29 managed-node2 systemd[27808]: Started libcrun container.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 100.\nJul 07 20:13:29 managed-node2 systemd[27808]: Started libcrun container.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 105.\nJul 07 20:13:29 managed-node2 systemd[27808]: Started A template for running K8s workloads via podman-kube-play.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 70.\nJul 07 20:13:29 managed-node2 podman[29660]: Pod:\nJul 07 20:13:29 managed-node2 podman[29660]: a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4\nJul 07 20:13:29 managed-node2 podman[29660]: Container:\nJul 07 20:13:29 managed-node2 podman[29660]: 98a702eb9e86b0efc7d3e6878bf2b4db5ac6ff3d0bc5383014d2958ce12dced5\nJul 07 20:13:29 managed-node2 sudo[29655]: pam_unix(sudo:session): session closed for user podman_basic_user\nJul 07 20:13:30 managed-node2 python3.9[29983]: ansible-getent Invoked with database=passwd key=root fail_key=False service=None split=None\nJul 07 20:13:30 managed-node2 python3.9[30133]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:13:32 managed-node2 python3.9[30284]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/httpd2.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:13:33 managed-node2 python3.9[30434]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd2 state=directory owner=root group=root recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:13:33 managed-node2 python3.9[30583]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd2-create state=directory owner=root group=root recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:13:34 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state.\nJul 07 20:13:34 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state.\nJul 07 20:13:34 managed-node2 podman[30766]: 2025-07-07 20:13:34.583080898 -0400 EDT m=+0.387686363 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610\nJul 07 20:13:35 managed-node2 python3.9[30929]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd2.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:13:35 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state.\nJul 07 20:13:35 managed-node2 python3.9[31078]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d state=directory owner=root group=0 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:13:35 managed-node2 python3.9[31227]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd2.yml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True\nJul 07 20:13:36 managed-node2 python3.9[31347]: ansible-ansible.legacy.copy Invoked with dest=/etc/containers/ansible-kubernetes.d/httpd2.yml owner=root group=0 mode=0644 src=/root/.ansible/tmp/ansible-tmp-1751933615.542873-13335-149986577229905/.source.yml _original_basename=.op5axps4 follow=False checksum=ce164467a3a112a82832f62e0fdfcaf3c7eecdd1 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:13:36 managed-node2 python3.9[31496]: ansible-containers.podman.podman_play Invoked with state=started debug=True log_level=debug kube_file=/etc/containers/ansible-kubernetes.d/httpd2.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None quiet=None recreate=None userns=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None\nJul 07 20:13:36 managed-node2 podman[31503]: 2025-07-07 20:13:36.614627171 -0400 EDT m=+0.024918635 network create 0a842076c75338ed23c6283f120afac661325ee3d3188d4246c57fcbd0ff921c (name=podman-default-kube-network, type=bridge)\nJul 07 20:13:36 managed-node2 systemd[1]: Created slice cgroup machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice.\n\u2591\u2591 Subject: A start job for unit machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1383.\nJul 07 20:13:36 managed-node2 podman[31503]: 2025-07-07 20:13:36.657483611 -0400 EDT m=+0.067774829 container create b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 (image=, name=a89535868ec0-infra, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5)\nJul 07 20:13:36 managed-node2 podman[31503]: 2025-07-07 20:13:36.663455669 -0400 EDT m=+0.073746755 pod create a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5 (image=, name=httpd2)\nJul 07 20:13:36 managed-node2 podman[31503]: 2025-07-07 20:13:36.690624127 -0400 EDT m=+0.100915321 container create f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5, app=test, io.containers.autoupdate=registry, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0)\nJul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7101] manager: (podman1): new Bridge device (/org/freedesktop/NetworkManager/Devices/3)\nJul 07 20:13:36 managed-node2 kernel: podman1: port 1(veth0) entered blocking state\nJul 07 20:13:36 managed-node2 kernel: podman1: port 1(veth0) entered disabled state\nJul 07 20:13:36 managed-node2 kernel: veth0: entered allmulticast mode\nJul 07 20:13:36 managed-node2 podman[31503]: 2025-07-07 20:13:36.667763108 -0400 EDT m=+0.078054303 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610\nJul 07 20:13:36 managed-node2 kernel: veth0: entered promiscuous mode\nJul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7230] manager: (veth0): new Veth device (/org/freedesktop/NetworkManager/Devices/4)\nJul 07 20:13:36 managed-node2 kernel: podman1: port 1(veth0) entered blocking state\nJul 07 20:13:36 managed-node2 kernel: podman1: port 1(veth0) entered forwarding state\nJul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7287] device (veth0): carrier: link connected\nJul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7289] device (podman1): carrier: link connected\nJul 07 20:13:36 managed-node2 systemd-udevd[31526]: Network interface NamePolicy= disabled on kernel command line.\nJul 07 20:13:36 managed-node2 systemd-udevd[31524]: Network interface NamePolicy= disabled on kernel command line.\nJul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7661] device (podman1): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external')\nJul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7667] device (podman1): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'external')\nJul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7674] device (podman1): Activation: starting connection 'podman1' (eac731d7-3726-4468-a790-cf1c7402dd92)\nJul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7676] device (podman1): state change: disconnected -> prepare (reason 'none', managed-type: 'external')\nJul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7680] device (podman1): state change: prepare -> config (reason 'none', managed-type: 'external')\nJul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7682] device (podman1): state change: config -> ip-config (reason 'none', managed-type: 'external')\nJul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7686] device (podman1): state change: ip-config -> ip-check (reason 'none', managed-type: 'external')\nJul 07 20:13:36 managed-node2 systemd[1]: Starting Network Manager Script Dispatcher Service...\n\u2591\u2591 Subject: A start job for unit NetworkManager-dispatcher.service has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit NetworkManager-dispatcher.service has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1388.\nJul 07 20:13:36 managed-node2 systemd[1]: Started Network Manager Script Dispatcher Service.\n\u2591\u2591 Subject: A start job for unit NetworkManager-dispatcher.service has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit NetworkManager-dispatcher.service has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1388.\nJul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7916] device (podman1): state change: ip-check -> secondaries (reason 'none', managed-type: 'external')\nJul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7918] device (podman1): state change: secondaries -> activated (reason 'none', managed-type: 'external')\nJul 07 20:13:36 managed-node2 NetworkManager[644]: [1751933616.7923] device (podman1): Activation: successful, device activated.\nJul 07 20:13:36 managed-node2 systemd[1]: Started /usr/libexec/podman/aardvark-dns --config /run/containers/networks/aardvark-dns -p 53 run.\n\u2591\u2591 Subject: A start job for unit run-rf8a9b32703c44fe9919a21200707a783.scope has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit run-rf8a9b32703c44fe9919a21200707a783.scope has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1454.\nJul 07 20:13:36 managed-node2 aardvark-dns[31613]: starting aardvark on a child with pid 31614\nJul 07 20:13:36 managed-node2 aardvark-dns[31614]: Successfully parsed config\nJul 07 20:13:36 managed-node2 aardvark-dns[31614]: Listen v4 ip {\"podman-default-kube-network\": [10.89.0.1]}\nJul 07 20:13:36 managed-node2 aardvark-dns[31614]: Listen v6 ip {}\nJul 07 20:13:36 managed-node2 aardvark-dns[31614]: Using the following upstream servers: [10.29.169.13:53, 10.29.170.12:53, 10.2.32.1:53]\nJul 07 20:13:36 managed-node2 systemd[1]: Started libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope.\n\u2591\u2591 Subject: A start job for unit libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1458.\nJul 07 20:13:36 managed-node2 conmon[31618]: conmon b54360e34ffcfca4fbf3 : addr{sun_family=AF_UNIX, sun_path=/proc/self/fd/12/attach}\nJul 07 20:13:36 managed-node2 conmon[31618]: conmon b54360e34ffcfca4fbf3 : terminal_ctrl_fd: 12\nJul 07 20:13:36 managed-node2 conmon[31618]: conmon b54360e34ffcfca4fbf3 : winsz read side: 16, winsz write side: 17\nJul 07 20:13:36 managed-node2 systemd[1]: Started libcrun container.\n\u2591\u2591 Subject: A start job for unit libpod-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit libpod-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1463.\nJul 07 20:13:36 managed-node2 conmon[31618]: conmon b54360e34ffcfca4fbf3 : container PID: 31620\nJul 07 20:13:36 managed-node2 podman[31503]: 2025-07-07 20:13:36.960118446 -0400 EDT m=+0.370409669 container init b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 (image=, name=a89535868ec0-infra, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5)\nJul 07 20:13:36 managed-node2 podman[31503]: 2025-07-07 20:13:36.963833697 -0400 EDT m=+0.374124951 container start b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 (image=, name=a89535868ec0-infra, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5)\nJul 07 20:13:36 managed-node2 systemd[1]: Started libpod-conmon-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope.\n\u2591\u2591 Subject: A start job for unit libpod-conmon-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit libpod-conmon-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1468.\nJul 07 20:13:36 managed-node2 conmon[31623]: conmon f451c929f398dfcc2303 : addr{sun_family=AF_UNIX, sun_path=/proc/self/fd/11/attach}\nJul 07 20:13:36 managed-node2 conmon[31623]: conmon f451c929f398dfcc2303 : terminal_ctrl_fd: 11\nJul 07 20:13:36 managed-node2 conmon[31623]: conmon f451c929f398dfcc2303 : winsz read side: 15, winsz write side: 16\nJul 07 20:13:37 managed-node2 systemd[1]: Started libcrun container.\n\u2591\u2591 Subject: A start job for unit libpod-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit libpod-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1473.\nJul 07 20:13:37 managed-node2 conmon[31623]: conmon f451c929f398dfcc2303 : container PID: 31625\nJul 07 20:13:37 managed-node2 podman[31503]: 2025-07-07 20:13:37.017137848 -0400 EDT m=+0.427429053 container init f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5, io.buildah.version=1.21.0, io.containers.autoupdate=registry, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage)\nJul 07 20:13:37 managed-node2 podman[31503]: 2025-07-07 20:13:37.020217845 -0400 EDT m=+0.430509047 container start f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5, io.buildah.version=1.21.0, io.containers.autoupdate=registry, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage)\nJul 07 20:13:37 managed-node2 podman[31503]: 2025-07-07 20:13:37.026166839 -0400 EDT m=+0.436457992 pod start a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5 (image=, name=httpd2)\nJul 07 20:13:37 managed-node2 python3.9[31496]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE command: /usr/bin/podman play kube --start=true --log-level=debug /etc/containers/ansible-kubernetes.d/httpd2.yml\nJul 07 20:13:37 managed-node2 python3.9[31496]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stdout: Pod:\n a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5\n Container:\n f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06\n \nJul 07 20:13:37 managed-node2 python3.9[31496]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stderr: time=\"2025-07-07T20:13:36-04:00\" level=info msg=\"/usr/bin/podman filtering at log level debug\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Called kube.PersistentPreRunE(/usr/bin/podman play kube --start=true --log-level=debug /etc/containers/ansible-kubernetes.d/httpd2.yml)\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Using conmon: \\\"/usr/bin/conmon\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=info msg=\"Using sqlite as database backend\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Using graph driver overlay\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Using graph root /var/lib/containers/storage\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Using run root /run/containers/storage\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Using static dir /var/lib/containers/storage/libpod\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Using tmp dir /run/libpod\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Using volume path /var/lib/containers/storage/volumes\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Using transient store: false\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"[graphdriver] trying provided driver \\\"overlay\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Cached value indicated that overlay is supported\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Cached value indicated that overlay is supported\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Cached value indicated that metacopy is being used\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Cached value indicated that native-diff is not being used\"\n time=\"2025-07-07T20:13:36-04:00\" level=info msg=\"Not using native diff for overlay, this may cause degraded performance for building images: kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"backingFs=xfs, projectQuotaSupported=false, useNativeDiff=false, usingMetacopy=true\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Initializing event backend journald\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Configured OCI runtime runc initialization failed: no valid executable found for OCI runtime runc: invalid argument\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Configured OCI runtime runj initialization failed: no valid executable found for OCI runtime runj: invalid argument\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Configured OCI runtime kata initialization failed: no valid executable found for OCI runtime kata: invalid argument\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Configured OCI runtime runsc initialization failed: no valid executable found for OCI runtime runsc: invalid argument\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Configured OCI runtime krun initialization failed: no valid executable found for OCI runtime krun: invalid argument\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Configured OCI runtime ocijail initialization failed: no valid executable found for OCI runtime ocijail: invalid argument\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Configured OCI runtime crun-vm initialization failed: no valid executable found for OCI runtime crun-vm: invalid argument\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Configured OCI runtime youki initialization failed: no valid executable found for OCI runtime youki: invalid argument\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Configured OCI runtime crun-wasm initialization failed: no valid executable found for OCI runtime crun-wasm: invalid argument\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Using OCI runtime \\\"/usr/bin/crun\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=info msg=\"Setting parallel job count to 7\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Successfully loaded network podman-default-kube-network: &{podman-default-kube-network 0a842076c75338ed23c6283f120afac661325ee3d3188d4246c57fcbd0ff921c bridge podman1 2025-07-07 20:11:21.084048926 -0400 EDT [{{{10.89.0.0 ffffff00}} 10.89.0.1 }] [] false false true [] map[] map[] map[driver:host-local]}\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Successfully loaded 2 networks\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Pod using bridge network mode\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Created cgroup path machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice for parent machine.slice and name libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Created cgroup machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Got pod cgroup as machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"no command or entrypoint provided, and no CMD or ENTRYPOINT from image: defaulting to empty string\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"using systemd mode: false\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"setting container name a89535868ec0-infra\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Loading seccomp profile from \\\"/usr/share/containers/seccomp.json\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Allocated lock 1 for container b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Cached value indicated that idmapped mounts for overlay are supported\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Created container \\\"b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Container \\\"b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2\\\" has work directory \\\"/var/lib/containers/storage/overlay-containers/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2/userdata\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Container \\\"b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2\\\" has run directory \\\"/run/containers/storage/overlay-containers/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2/userdata\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Looking up image \\\"quay.io/libpod/testimage:20210610\\\" in local containers storage\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Normalized platform linux/amd64 to {amd64 linux [] }\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Trying \\\"quay.io/libpod/testimage:20210610\\\" ...\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"parsed reference into \\\"[overlay@/var/lib/containers/storage+/run/containers/storage:overlay.mountopt=nodev,metacopy=on]@9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Found image \\\"quay.io/libpod/testimage:20210610\\\" as \\\"quay.io/libpod/testimage:20210610\\\" in local containers storage\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Found image \\\"quay.io/libpod/testimage:20210610\\\" as \\\"quay.io/libpod/testimage:20210610\\\" in local containers storage ([overlay@/var/lib/containers/storage+/run/containers/storage:overlay.mountopt=nodev,metacopy=on]@9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f)\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"exporting opaque data as blob \\\"sha256:9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Pulling image quay.io/libpod/testimage:20210610 (policy: missing)\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Looking up image \\\"quay.io/libpod/testimage:20210610\\\" in local containers storage\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Normalized platform linux/amd64 to {amd64 linux [] }\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Trying \\\"quay.io/libpod/testimage:20210610\\\" ...\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"parsed reference into \\\"[overlay@/var/lib/containers/storage+/run/containers/storage:overlay.mountopt=nodev,metacopy=on]@9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Found image \\\"quay.io/libpod/testimage:20210610\\\" as \\\"quay.io/libpod/testimage:20210610\\\" in local containers storage\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Found image \\\"quay.io/libpod/testimage:20210610\\\" as \\\"quay.io/libpod/testimage:20210610\\\" in local containers storage ([overlay@/var/lib/containers/storage+/run/containers/storage:overlay.mountopt=nodev,metacopy=on]@9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f)\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"exporting opaque data as blob \\\"sha256:9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Inspecting image 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"exporting opaque data as blob \\\"sha256:9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"exporting opaque data as blob \\\"sha256:9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Looking up image \\\"quay.io/libpod/testimage:20210610\\\" in local containers storage\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Normalized platform linux/amd64 to {amd64 linux [] }\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Trying \\\"quay.io/libpod/testimage:20210610\\\" ...\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"parsed reference into \\\"[overlay@/var/lib/containers/storage+/run/containers/storage:overlay.mountopt=nodev,metacopy=on]@9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Found image \\\"quay.io/libpod/testimage:20210610\\\" as \\\"quay.io/libpod/testimage:20210610\\\" in local containers storage\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Found image \\\"quay.io/libpod/testimage:20210610\\\" as \\\"quay.io/libpod/testimage:20210610\\\" in local containers storage ([overlay@/var/lib/containers/storage+/run/containers/storage:overlay.mountopt=nodev,metacopy=on]@9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f)\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"exporting opaque data as blob \\\"sha256:9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Inspecting image 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"exporting opaque data as blob \\\"sha256:9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"exporting opaque data as blob \\\"sha256:9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Inspecting image 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Inspecting image 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"using systemd mode: false\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"adding container to pod httpd2\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"setting container name httpd2-httpd2\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Loading seccomp profile from \\\"/usr/share/containers/seccomp.json\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=info msg=\"Sysctl net.ipv4.ping_group_range=0 0 ignored in containers.conf, since Network Namespace set to host\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Adding mount /proc\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Adding mount /dev\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Adding mount /dev/pts\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Adding mount /dev/mqueue\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Adding mount /sys\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Adding mount /sys/fs/cgroup\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Allocated lock 2 for container f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"exporting opaque data as blob \\\"sha256:9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Created container \\\"f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Container \\\"f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06\\\" has work directory \\\"/var/lib/containers/storage/overlay-containers/f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06/userdata\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Container \\\"f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06\\\" has run directory \\\"/run/containers/storage/overlay-containers/f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06/userdata\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Strongconnecting node b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Pushed b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 onto stack\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Finishing node b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2. Popped b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 off stack\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Strongconnecting node f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Pushed f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 onto stack\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Finishing node f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06. Popped f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 off stack\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Made network namespace at /run/netns/netns-3dcd885d-1b51-2e38-72ff-33596f02c329 for container b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2\"\n [DEBUG netavark::network::validation] Validating network namespace...\n [DEBUG netavark::commands::setup] Setting up...\n [INFO netavark::firewall] Using iptables firewall driver\n [DEBUG netavark::network::bridge] Setup network podman-default-kube-network\n [DEBUG netavark::network::bridge] Container interface name: eth0 with IP addresses [10.89.0.2/24]\n [DEBUG netavark::network::bridge] Bridge name: podman1 with IP addresses [10.89.0.1/24]\n [DEBUG netavark::network::core_utils] Setting sysctl value for net.ipv4.ip_forward to 1\n [DEBUG netavark::network::core_utils] Setting sysctl value for /proc/sys/net/ipv4/conf/podman1/rp_filter to 2\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Created root filesystem for container b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 at /var/lib/containers/storage/overlay-containers/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2/rootfs/merge\"\n [DEBUG netavark::network::core_utils] Setting sysctl value for /proc/sys/net/ipv6/conf/eth0/autoconf to 0\n [DEBUG netavark::network::core_utils] Setting sysctl value for /proc/sys/net/ipv4/conf/eth0/arp_notify to 1\n [DEBUG netavark::network::core_utils] Setting sysctl value for /proc/sys/net/ipv4/conf/eth0/rp_filter to 2\n [INFO netavark::network::netlink] Adding route (dest: 0.0.0.0/0 ,gw: 10.89.0.1, metric 100)\n [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK-4B9D9135B29BA created on table nat\n [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK_ISOLATION_2 created on table filter\n [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK_ISOLATION_3 created on table filter\n [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK_INPUT created on table filter\n [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK_FORWARD created on table filter\n [DEBUG netavark::firewall::varktables::helpers] rule -d 10.89.0.0/24 -j ACCEPT created on table nat and chain NETAVARK-4B9D9135B29BA\n [DEBUG netavark::firewall::varktables::helpers] rule ! -d 224.0.0.0/4 -j MASQUERADE created on table nat and chain NETAVARK-4B9D9135B29BA\n [DEBUG netavark::firewall::varktables::helpers] rule -s 10.89.0.0/24 -j NETAVARK-4B9D9135B29BA created on table nat and chain POSTROUTING\n [DEBUG netavark::firewall::varktables::helpers] rule -p udp -s 10.89.0.0/24 --dport 53 -j ACCEPT created on table filter and chain NETAVARK_INPUT\n [DEBUG netavark::firewall::varktables::helpers] rule -p tcp -s 10.89.0.0/24 --dport 53 -j ACCEPT created on table filter and chain NETAVARK_INPUT\n [DEBUG netavark::firewall::varktables::helpers] rule -m conntrack --ctstate INVALID -j DROP created on table filter and chain NETAVARK_FORWARD\n [DEBUG netavark::firewall::varktables::helpers] rule -d 10.89.0.0/24 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT created on table filter and chain NETAVARK_FORWARD\n [DEBUG netavark::firewall::varktables::helpers] rule -s 10.89.0.0/24 -j ACCEPT created on table filter and chain NETAVARK_FORWARD\n [DEBUG netavark::firewall::firewalld] Adding firewalld rules for network 10.89.0.0/24\n [DEBUG netavark::firewall::firewalld] Adding subnet 10.89.0.0/24 to zone trusted as source\n [DEBUG netavark::network::core_utils] Setting sysctl value for net.ipv4.conf.podman1.route_localnet to 1\n [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK-HOSTPORT-SETMARK created on table nat\n [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK-HOSTPORT-MASQ created on table nat\n [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK-DN-4B9D9135B29BA created on table nat\n [DEBUG netavark::firewall::varktables::helpers] chain NETAVARK-HOSTPORT-DNAT created on table nat\n [DEBUG netavark::firewall::varktables::helpers] rule -j MARK --set-xmark 0x2000/0x2000 created on table nat and chain NETAVARK-HOSTPORT-SETMARK\n [DEBUG netavark::firewall::varktables::helpers] rule -j MASQUERADE -m comment --comment 'netavark portfw masq mark' -m mark --mark 0x2000/0x2000 created on table nat and chain NETAVARK-HOSTPORT-MASQ\n [DEBUG netavark::firewall::varktables::helpers] rule -j NETAVARK-HOSTPORT-SETMARK -s 10.89.0.0/24 -p tcp --dport 15002 created on table nat and chain NETAVARK-DN-4B9D9135B29BA\n [DEBUG netavark::firewall::varktables::helpers] rule -j NETAVARK-HOSTPORT-SETMARK -s 127.0.0.1 -p tcp --dport 15002 created on table nat and chain NETAVARK-DN-4B9D9135B29BA\n [DEBUG netavark::firewall::varktables::helpers] rule -j DNAT -p tcp --to-destination 10.89.0.2:80 --destination-port 15002 created on table nat and chain NETAVARK-DN-4B9D9135B29BA\n [DEBUG netavark::firewall::varktables::helpers] rule -j NETAVARK-DN-4B9D9135B29BA -p tcp --dport 15002 -m comment --comment 'dnat name: podman-default-kube-network id: b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2' created on table nat and chain NETAVARK-HOSTPORT-DNAT\n [DEBUG netavark::firewall::varktables::helpers] rule -j NETAVARK-HOSTPORT-DNAT -m addrtype --dst-type LOCAL created on table nat and chain PREROUTING\n [DEBUG netavark::firewall::varktables::helpers] rule -j NETAVARK-HOSTPORT-DNAT -m addrtype --dst-type LOCAL created on table nat and chain OUTPUT\n [DEBUG netavark::dns::aardvark] Spawning aardvark server\n [DEBUG netavark::dns::aardvark] start aardvark-dns: [\"systemd-run\", \"-q\", \"--scope\", \"/usr/libexec/podman/aardvark-dns\", \"--config\", \"/run/containers/networks/aardvark-dns\", \"-p\", \"53\", \"run\"]\n [DEBUG netavark::commands::setup] {\n \"podman-default-kube-network\": StatusBlock {\n dns_search_domains: Some(\n [\n \"dns.podman\",\n ],\n ),\n dns_server_ips: Some(\n [\n 10.89.0.1,\n ],\n ),\n interfaces: Some(\n {\n \"eth0\": NetInterface {\n mac_address: \"ce:5c:7c:33:0d:65\",\n subnets: Some(\n [\n NetAddress {\n gateway: Some(\n 10.89.0.1,\n ),\n ipnet: 10.89.0.2/24,\n },\n ],\n ),\n },\n },\n ),\n },\n }\n [DEBUG netavark::commands::setup] Setup complete\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"/proc/sys/crypto/fips_enabled does not contain '1', not adding FIPS mode bind mounts\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Setting Cgroups for container b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 to machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice:libpod:b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"reading hooks from /usr/share/containers/oci/hooks.d\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Workdir \\\"/\\\" resolved to host path \\\"/var/lib/containers/storage/overlay-containers/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2/rootfs/merge\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Created OCI spec for container b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 at /var/lib/containers/storage/overlay-containers/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2/userdata/config.json\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Created cgroup path machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice for parent machine.slice and name libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Created cgroup machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Got pod cgroup as machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"/usr/bin/conmon messages will be logged to syslog\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"running conmon: /usr/bin/conmon\" args=\"[--api-version 1 -c b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 -u b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2/userdata -p /run/containers/storage/overlay-containers/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2/userdata/pidfile -n a89535868ec0-infra --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 --full-attach -s -l journald --log-level debug --syslog --conmon-pidfile /run/containers/storage/overlay-containers/b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2/userdata/conmon.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg debug --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg --syslog --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2]\"\n time=\"2025-07-07T20:13:36-04:00\" level=info msg=\"Running conmon under slice machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice and unitName libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Received: 31620\"\n time=\"2025-07-07T20:13:36-04:00\" level=info msg=\"Got Conmon PID as 31618\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Created container b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 in OCI runtime\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Adding nameserver(s) from network status of '[\\\"10.89.0.1\\\"]'\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Adding search domain(s) from network status of '[\\\"dns.podman\\\"]'\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Starting container b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 with command [/catatonit -P]\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Started container b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"overlay: mount_data=lowerdir=/var/lib/containers/storage/overlay/l/BSXLDW6S4QQFLDJH6Z45ODLX6A,upperdir=/var/lib/containers/storage/overlay/031fa2927da1e19a93d45dbb8f74acff9ff44a2167616de52d9d6bf7a2e6be26/diff,workdir=/var/lib/containers/storage/overlay/031fa2927da1e19a93d45dbb8f74acff9ff44a2167616de52d9d6bf7a2e6be26/work,nodev,metacopy=on,context=\\\"system_u:object_r:container_file_t:s0:c263,c753\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Mounted container \\\"f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06\\\" at \\\"/var/lib/containers/storage/overlay/031fa2927da1e19a93d45dbb8f74acff9ff44a2167616de52d9d6bf7a2e6be26/merged\\\"\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Created root filesystem for container f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 at /var/lib/containers/storage/overlay/031fa2927da1e19a93d45dbb8f74acff9ff44a2167616de52d9d6bf7a2e6be26/merged\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"/proc/sys/crypto/fips_enabled does not contain '1', not adding FIPS mode bind mounts\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Setting Cgroups for container f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 to machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice:libpod:f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"reading hooks from /usr/share/containers/oci/hooks.d\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Workdir \\\"/var/www\\\" resolved to a volume or mount\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Created OCI spec for container f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 at /var/lib/containers/storage/overlay-containers/f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06/userdata/config.json\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Created cgroup path machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice for parent machine.slice and name libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Created cgroup machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"Got pod cgroup as machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"/usr/bin/conmon messages will be logged to syslog\"\n time=\"2025-07-07T20:13:36-04:00\" level=debug msg=\"running conmon: /usr/bin/conmon\" args=\"[--api-version 1 -c f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 -u f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06/userdata -p /run/containers/storage/overlay-containers/f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06/userdata/pidfile -n httpd2-httpd2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 --full-attach -s -l journald --log-level debug --syslog --conmon-pidfile /run/containers/storage/overlay-containers/f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06/userdata/conmon.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg debug --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg --syslog --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06]\"\n time=\"2025-07-07T20:13:36-04:00\" level=info msg=\"Running conmon under slice machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice and unitName libpod-conmon-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope\"\n time=\"2025-07-07T20:13:37-04:00\" level=debug msg=\"Received: 31625\"\n time=\"2025-07-07T20:13:37-04:00\" level=info msg=\"Got Conmon PID as 31623\"\n time=\"2025-07-07T20:13:37-04:00\" level=debug msg=\"Created container f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 in OCI runtime\"\n time=\"2025-07-07T20:13:37-04:00\" level=debug msg=\"Starting container f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 with command [/bin/busybox-extras httpd -f -p 80]\"\n time=\"2025-07-07T20:13:37-04:00\" level=debug msg=\"Started container f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06\"\n time=\"2025-07-07T20:13:37-04:00\" level=debug msg=\"Called kube.PersistentPostRunE(/usr/bin/podman play kube --start=true --log-level=debug /etc/containers/ansible-kubernetes.d/httpd2.yml)\"\n time=\"2025-07-07T20:13:37-04:00\" level=debug msg=\"Shutting down engines\"\nJul 07 20:13:37 managed-node2 python3.9[31496]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE rc: 0\nJul 07 20:13:37 managed-node2 python3.9[31775]: ansible-systemd Invoked with daemon_reload=True scope=system daemon_reexec=False no_block=False name=None state=None enabled=None force=None masked=None\nJul 07 20:13:37 managed-node2 systemd[1]: Reloading.\nJul 07 20:13:37 managed-node2 systemd-rc-local-generator[31792]: /etc/rc.d/rc.local is not marked executable, skipping.\nJul 07 20:13:38 managed-node2 python3.9[31958]: ansible-systemd Invoked with name=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service scope=system enabled=True daemon_reload=False daemon_reexec=False no_block=False state=None force=None masked=None\nJul 07 20:13:38 managed-node2 systemd[1]: Reloading.\nJul 07 20:13:38 managed-node2 systemd-rc-local-generator[31978]: /etc/rc.d/rc.local is not marked executable, skipping.\nJul 07 20:13:39 managed-node2 python3.9[32143]: ansible-systemd Invoked with name=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service scope=system state=started daemon_reload=False daemon_reexec=False no_block=False enabled=None force=None masked=None\nJul 07 20:13:39 managed-node2 systemd[1]: Created slice Slice /system/podman-kube.\n\u2591\u2591 Subject: A start job for unit system-podman\\x2dkube.slice has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit system-podman\\x2dkube.slice has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1479.\nJul 07 20:13:39 managed-node2 systemd[1]: Starting A template for running K8s workloads via podman-kube-play...\n\u2591\u2591 Subject: A start job for unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1478.\nJul 07 20:13:39 managed-node2 podman[32147]: 2025-07-07 20:13:39.145431853 -0400 EDT m=+0.025527029 pod stop a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5 (image=, name=httpd2)\nJul 07 20:13:46 managed-node2 systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit NetworkManager-dispatcher.service has successfully entered the 'dead' state.\nJul 07 20:13:49 managed-node2 podman[32147]: time=\"2025-07-07T20:13:49-04:00\" level=warning msg=\"StopSignal SIGTERM failed to stop container httpd2-httpd2 in 10 seconds, resorting to SIGKILL\"\nJul 07 20:13:49 managed-node2 systemd[1]: libpod-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit libpod-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope has successfully entered the 'dead' state.\nJul 07 20:13:49 managed-node2 conmon[31623]: conmon f451c929f398dfcc2303 : container 31625 exited with status 137\nJul 07 20:13:49 managed-node2 conmon[31623]: conmon f451c929f398dfcc2303 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice/libpod-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope/container/memory.events\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.174232474 -0400 EDT m=+10.054327720 container died f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, io.containers.autoupdate=registry, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0)\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Called cleanup.PersistentPreRunE(/usr/bin/podman --root /var/lib/containers/storage --runroot /run/containers/storage --log-level debug --cgroup-manager systemd --tmpdir /run/libpod --network-config-dir --network-backend netavark --volumepath /var/lib/containers/storage/volumes --db-backend sqlite --transient-store=false --runtime crun --storage-driver overlay --storage-opt overlay.mountopt=nodev,metacopy=on --events-backend journald --syslog container cleanup --stopped-only f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06)\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Setting custom database backend: \\\"sqlite\\\"\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using conmon: \\\"/usr/bin/conmon\\\"\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=info msg=\"Using sqlite as database backend\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using graph driver overlay\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using graph root /var/lib/containers/storage\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using run root /run/containers/storage\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using static dir /var/lib/containers/storage/libpod\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using tmp dir /run/libpod\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using volume path /var/lib/containers/storage/volumes\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using transient store: false\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"[graphdriver] trying provided driver \\\"overlay\\\"\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Cached value indicated that overlay is supported\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Cached value indicated that overlay is supported\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Cached value indicated that metacopy is being used\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Cached value indicated that native-diff is not being used\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=info msg=\"Not using native diff for overlay, this may cause degraded performance for building images: kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"backingFs=xfs, projectQuotaSupported=false, useNativeDiff=false, usingMetacopy=true\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Initializing event backend journald\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime runc initialization failed: no valid executable found for OCI runtime runc: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime runj initialization failed: no valid executable found for OCI runtime runj: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime kata initialization failed: no valid executable found for OCI runtime kata: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime runsc initialization failed: no valid executable found for OCI runtime runsc: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime ocijail initialization failed: no valid executable found for OCI runtime ocijail: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime crun-vm initialization failed: no valid executable found for OCI runtime crun-vm: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime crun-wasm initialization failed: no valid executable found for OCI runtime crun-wasm: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime youki initialization failed: no valid executable found for OCI runtime youki: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime krun initialization failed: no valid executable found for OCI runtime krun: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using OCI runtime \\\"/usr/bin/crun\\\"\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=info msg=\"Setting parallel job count to 7\"\nJul 07 20:13:49 managed-node2 systemd[1]: var-lib-containers-storage-overlay-031fa2927da1e19a93d45dbb8f74acff9ff44a2167616de52d9d6bf7a2e6be26-merged.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay-031fa2927da1e19a93d45dbb8f74acff9ff44a2167616de52d9d6bf7a2e6be26-merged.mount has successfully entered the 'dead' state.\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.219658211 -0400 EDT m=+10.099753380 container cleanup f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry, app=test)\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Called cleanup.PersistentPostRunE(/usr/bin/podman --root /var/lib/containers/storage --runroot /run/containers/storage --log-level debug --cgroup-manager systemd --tmpdir /run/libpod --network-config-dir --network-backend netavark --volumepath /var/lib/containers/storage/volumes --db-backend sqlite --transient-store=false --runtime crun --storage-driver overlay --storage-opt overlay.mountopt=nodev,metacopy=on --events-backend journald --syslog container cleanup --stopped-only f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06)\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Shutting down engines\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32158]: time=\"2025-07-07T20:13:49-04:00\" level=info msg=\"Received shutdown.Stop(), terminating!\" PID=32158\nJul 07 20:13:49 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state.\nJul 07 20:13:49 managed-node2 systemd[1]: libpod-conmon-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit libpod-conmon-f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06.scope has successfully entered the 'dead' state.\nJul 07 20:13:49 managed-node2 systemd[1]: libpod-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit libpod-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has successfully entered the 'dead' state.\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.241852628 -0400 EDT m=+10.121948152 container died b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 (image=, name=a89535868ec0-infra)\nJul 07 20:13:49 managed-node2 aardvark-dns[31614]: Received SIGHUP\nJul 07 20:13:49 managed-node2 systemd[1]: run-rf8a9b32703c44fe9919a21200707a783.scope: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit run-rf8a9b32703c44fe9919a21200707a783.scope has successfully entered the 'dead' state.\nJul 07 20:13:49 managed-node2 aardvark-dns[31614]: Successfully parsed config\nJul 07 20:13:49 managed-node2 aardvark-dns[31614]: Listen v4 ip {}\nJul 07 20:13:49 managed-node2 aardvark-dns[31614]: Listen v6 ip {}\nJul 07 20:13:49 managed-node2 aardvark-dns[31614]: No configuration found stopping the sever\nJul 07 20:13:49 managed-node2 kernel: podman1: port 1(veth0) entered disabled state\nJul 07 20:13:49 managed-node2 kernel: veth0 (unregistering): left allmulticast mode\nJul 07 20:13:49 managed-node2 kernel: veth0 (unregistering): left promiscuous mode\nJul 07 20:13:49 managed-node2 kernel: podman1: port 1(veth0) entered disabled state\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Called cleanup.PersistentPreRunE(/usr/bin/podman --root /var/lib/containers/storage --runroot /run/containers/storage --log-level debug --cgroup-manager systemd --tmpdir /run/libpod --network-config-dir --network-backend netavark --volumepath /var/lib/containers/storage/volumes --db-backend sqlite --transient-store=false --runtime crun --storage-driver overlay --storage-opt overlay.mountopt=nodev,metacopy=on --events-backend journald --syslog container cleanup --stopped-only b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2)\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Setting custom database backend: \\\"sqlite\\\"\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using conmon: \\\"/usr/bin/conmon\\\"\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=info msg=\"Using sqlite as database backend\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using graph driver overlay\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using graph root /var/lib/containers/storage\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using run root /run/containers/storage\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using static dir /var/lib/containers/storage/libpod\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using tmp dir /run/libpod\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using volume path /var/lib/containers/storage/volumes\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using transient store: false\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"[graphdriver] trying provided driver \\\"overlay\\\"\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Cached value indicated that overlay is supported\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Cached value indicated that overlay is supported\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Cached value indicated that metacopy is being used\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Cached value indicated that native-diff is not being used\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=info msg=\"Not using native diff for overlay, this may cause degraded performance for building images: kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"backingFs=xfs, projectQuotaSupported=false, useNativeDiff=false, usingMetacopy=true\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Initializing event backend journald\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime kata initialization failed: no valid executable found for OCI runtime kata: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime runsc initialization failed: no valid executable found for OCI runtime runsc: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime krun initialization failed: no valid executable found for OCI runtime krun: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime ocijail initialization failed: no valid executable found for OCI runtime ocijail: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime crun-vm initialization failed: no valid executable found for OCI runtime crun-vm: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime crun-wasm initialization failed: no valid executable found for OCI runtime crun-wasm: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime youki initialization failed: no valid executable found for OCI runtime youki: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime runc initialization failed: no valid executable found for OCI runtime runc: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Configured OCI runtime runj initialization failed: no valid executable found for OCI runtime runj: invalid argument\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Using OCI runtime \\\"/usr/bin/crun\\\"\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=info msg=\"Setting parallel job count to 7\"\nJul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.2949] device (podman1): state change: activated -> unmanaged (reason 'unmanaged', managed-type: 'removed')\nJul 07 20:13:49 managed-node2 systemd[1]: Starting Network Manager Script Dispatcher Service...\n\u2591\u2591 Subject: A start job for unit NetworkManager-dispatcher.service has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit NetworkManager-dispatcher.service has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1551.\nJul 07 20:13:49 managed-node2 systemd[1]: Started Network Manager Script Dispatcher Service.\n\u2591\u2591 Subject: A start job for unit NetworkManager-dispatcher.service has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit NetworkManager-dispatcher.service has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1551.\nJul 07 20:13:49 managed-node2 systemd[1]: run-netns-netns\\x2d3dcd885d\\x2d1b51\\x2d2e38\\x2d72ff\\x2d33596f02c329.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit run-netns-netns\\x2d3dcd885d\\x2d1b51\\x2d2e38\\x2d72ff\\x2d33596f02c329.mount has successfully entered the 'dead' state.\nJul 07 20:13:49 managed-node2 systemd[1]: var-lib-containers-storage-overlay\\x2dcontainers-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2-rootfs-merge.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay\\x2dcontainers-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2-rootfs-merge.mount has successfully entered the 'dead' state.\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.470731214 -0400 EDT m=+10.350826660 container cleanup b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 (image=, name=a89535868ec0-infra, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5)\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Called cleanup.PersistentPostRunE(/usr/bin/podman --root /var/lib/containers/storage --runroot /run/containers/storage --log-level debug --cgroup-manager systemd --tmpdir /run/libpod --network-config-dir --network-backend netavark --volumepath /var/lib/containers/storage/volumes --db-backend sqlite --transient-store=false --runtime crun --storage-driver overlay --storage-opt overlay.mountopt=nodev,metacopy=on --events-backend journald --syslog container cleanup --stopped-only b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2)\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=debug msg=\"Shutting down engines\"\nJul 07 20:13:49 managed-node2 /usr/bin/podman[32170]: time=\"2025-07-07T20:13:49-04:00\" level=info msg=\"Received shutdown.Stop(), terminating!\" PID=32170\nJul 07 20:13:49 managed-node2 systemd[1]: Stopping libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope...\n\u2591\u2591 Subject: A stop job for unit libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1618.\nJul 07 20:13:49 managed-node2 systemd[1]: libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has successfully entered the 'dead' state.\nJul 07 20:13:49 managed-node2 systemd[1]: Stopped libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope.\n\u2591\u2591 Subject: A stop job for unit libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit libpod-conmon-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2.scope has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1618 and the job result is done.\nJul 07 20:13:49 managed-node2 systemd[1]: Removed slice cgroup machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice.\n\u2591\u2591 Subject: A stop job for unit machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1617 and the job result is done.\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.543096205 -0400 EDT m=+10.423191413 container remove f451c929f398dfcc2303f43b65fc812d1e0a50f25198322fc88ffc0a3004db06 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5, io.containers.autoupdate=registry, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0)\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.569252202 -0400 EDT m=+10.449347410 container remove b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2 (image=, name=a89535868ec0-infra, pod_id=a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5)\nJul 07 20:13:49 managed-node2 systemd[1]: machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice: Failed to open /run/systemd/transient/machine-libpod_pod_a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5.slice: No such file or directory\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.577427834 -0400 EDT m=+10.457523002 pod remove a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5 (image=, name=httpd2)\nJul 07 20:13:49 managed-node2 podman[32147]: Pods stopped:\nJul 07 20:13:49 managed-node2 podman[32147]: a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5\nJul 07 20:13:49 managed-node2 podman[32147]: Pods removed:\nJul 07 20:13:49 managed-node2 podman[32147]: a89535868ec016696f34385295b0e393df118e3a9f14de054a642482340949f5\nJul 07 20:13:49 managed-node2 podman[32147]: Secrets removed:\nJul 07 20:13:49 managed-node2 podman[32147]: Volumes removed:\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.577733569 -0400 EDT m=+10.457828934 network create 0a842076c75338ed23c6283f120afac661325ee3d3188d4246c57fcbd0ff921c (name=podman-default-kube-network, type=bridge)\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.601543415 -0400 EDT m=+10.481638618 container create 78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32 (image=, name=91ecf1609ad1-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service)\nJul 07 20:13:49 managed-node2 systemd[1]: Created slice cgroup machine-libpod_pod_d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0.slice.\n\u2591\u2591 Subject: A start job for unit machine-libpod_pod_d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0.slice has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit machine-libpod_pod_d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0.slice has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1619.\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.643353432 -0400 EDT m=+10.523448629 container create 08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b (image=, name=d17e7ef2e094-infra, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service)\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.649232898 -0400 EDT m=+10.529328325 pod create d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0 (image=, name=httpd2)\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.675477572 -0400 EDT m=+10.555572747 container create 3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, created_by=test/system/build-testimage, io.buildah.version=1.21.0, app=test, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service, created_at=2021-06-10T18:55:36Z)\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.675792512 -0400 EDT m=+10.555887719 container restart 78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32 (image=, name=91ecf1609ad1-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service)\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.651020296 -0400 EDT m=+10.531115614 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610\nJul 07 20:13:49 managed-node2 systemd[1]: Started libcrun container.\n\u2591\u2591 Subject: A start job for unit libpod-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32.scope has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit libpod-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32.scope has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1623.\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.731153306 -0400 EDT m=+10.611248537 container init 78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32 (image=, name=91ecf1609ad1-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service)\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.734097189 -0400 EDT m=+10.614192506 container start 78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32 (image=, name=91ecf1609ad1-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service)\nJul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7517] manager: (podman1): new Bridge device (/org/freedesktop/NetworkManager/Devices/5)\nJul 07 20:13:49 managed-node2 systemd-udevd[32184]: Network interface NamePolicy= disabled on kernel command line.\nJul 07 20:13:49 managed-node2 kernel: podman1: port 1(veth0) entered blocking state\nJul 07 20:13:49 managed-node2 kernel: podman1: port 1(veth0) entered disabled state\nJul 07 20:13:49 managed-node2 kernel: veth0: entered allmulticast mode\nJul 07 20:13:49 managed-node2 kernel: veth0: entered promiscuous mode\nJul 07 20:13:49 managed-node2 systemd-udevd[32189]: Network interface NamePolicy= disabled on kernel command line.\nJul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7610] manager: (veth0): new Veth device (/org/freedesktop/NetworkManager/Devices/6)\nJul 07 20:13:49 managed-node2 kernel: podman1: port 1(veth0) entered blocking state\nJul 07 20:13:49 managed-node2 kernel: podman1: port 1(veth0) entered forwarding state\nJul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7658] device (veth0): carrier: link connected\nJul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7662] device (podman1): carrier: link connected\nJul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7742] device (podman1): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external')\nJul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7747] device (podman1): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'external')\nJul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7759] device (podman1): Activation: starting connection 'podman1' (9a09baee-577d-45df-991f-e577871fe999)\nJul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7761] device (podman1): state change: disconnected -> prepare (reason 'none', managed-type: 'external')\nJul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7766] device (podman1): state change: prepare -> config (reason 'none', managed-type: 'external')\nJul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7769] device (podman1): state change: config -> ip-config (reason 'none', managed-type: 'external')\nJul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7773] device (podman1): state change: ip-config -> ip-check (reason 'none', managed-type: 'external')\nJul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7875] device (podman1): state change: ip-check -> secondaries (reason 'none', managed-type: 'external')\nJul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7879] device (podman1): state change: secondaries -> activated (reason 'none', managed-type: 'external')\nJul 07 20:13:49 managed-node2 NetworkManager[644]: [1751933629.7899] device (podman1): Activation: successful, device activated.\nJul 07 20:13:49 managed-node2 systemd[1]: Started /usr/libexec/podman/aardvark-dns --config /run/containers/networks/aardvark-dns -p 53 run.\n\u2591\u2591 Subject: A start job for unit run-rce7152e4cf79441b86b3f3ed7d6f4283.scope has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit run-rce7152e4cf79441b86b3f3ed7d6f4283.scope has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1627.\nJul 07 20:13:49 managed-node2 systemd[1]: Started libcrun container.\n\u2591\u2591 Subject: A start job for unit libpod-08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b.scope has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit libpod-08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b.scope has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1631.\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.929395798 -0400 EDT m=+10.809491054 container init 08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b (image=, name=d17e7ef2e094-infra, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service)\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.932349651 -0400 EDT m=+10.812444917 container start 08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b (image=, name=d17e7ef2e094-infra, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service)\nJul 07 20:13:49 managed-node2 systemd[1]: Started libcrun container.\n\u2591\u2591 Subject: A start job for unit libpod-3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5.scope has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit libpod-3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5.scope has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1636.\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.975052109 -0400 EDT m=+10.855147428 container init 3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0)\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.978227355 -0400 EDT m=+10.858322532 container start 3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, io.buildah.version=1.21.0, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage)\nJul 07 20:13:49 managed-node2 podman[32147]: 2025-07-07 20:13:49.984086055 -0400 EDT m=+10.864181258 pod start d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0 (image=, name=httpd2)\nJul 07 20:13:49 managed-node2 systemd[1]: Started A template for running K8s workloads via podman-kube-play.\n\u2591\u2591 Subject: A start job for unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1478.\nJul 07 20:13:49 managed-node2 podman[32147]: Pod:\nJul 07 20:13:49 managed-node2 podman[32147]: d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0\nJul 07 20:13:49 managed-node2 podman[32147]: Container:\nJul 07 20:13:49 managed-node2 podman[32147]: 3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5\nJul 07 20:13:50 managed-node2 systemd[1]: var-lib-containers-storage-overlay\\x2dcontainers-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2-userdata-shm.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay\\x2dcontainers-b54360e34ffcfca4fbf3843a5b6f3c2a2bf90b7a32014d5a23b66a9ea74b94a2-userdata-shm.mount has successfully entered the 'dead' state.\nJul 07 20:13:51 managed-node2 python3.9[32468]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:13:52 managed-node2 python3.9[32619]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/httpd3.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:13:53 managed-node2 python3.9[32769]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd3 state=directory owner=root group=root recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:13:53 managed-node2 python3.9[32918]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd3-create state=directory owner=root group=root recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:13:54 managed-node2 podman[33098]: 2025-07-07 20:13:54.593475273 -0400 EDT m=+0.280680463 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610\nJul 07 20:13:55 managed-node2 python3.9[33262]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd3.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:13:55 managed-node2 python3.9[33411]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d state=directory owner=root group=0 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:13:55 managed-node2 python3.9[33560]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd3.yml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True\nJul 07 20:13:56 managed-node2 python3.9[33680]: ansible-ansible.legacy.copy Invoked with dest=/etc/containers/ansible-kubernetes.d/httpd3.yml owner=root group=0 mode=0644 src=/root/.ansible/tmp/ansible-tmp-1751933635.6006646-13961-107071336953239/.source.yml _original_basename=._73jk67j follow=False checksum=5b3685de46cacb0a0661419a5a5898cbb3cf431c backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:13:56 managed-node2 python3.9[33829]: ansible-containers.podman.podman_play Invoked with state=started kube_file=/etc/containers/ansible-kubernetes.d/httpd3.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None debug=None quiet=None recreate=None userns=None log_level=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None\nJul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.647468979 -0400 EDT m=+0.017382508 network create 0a842076c75338ed23c6283f120afac661325ee3d3188d4246c57fcbd0ff921c (name=podman-default-kube-network, type=bridge)\nJul 07 20:13:56 managed-node2 systemd[1]: Created slice cgroup machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice.\n\u2591\u2591 Subject: A start job for unit machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1641.\nJul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.687774841 -0400 EDT m=+0.057688390 container create f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b (image=, name=06637f87acc7-infra, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7)\nJul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.693778889 -0400 EDT m=+0.063692415 pod create 06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7 (image=, name=httpd3)\nJul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.718042148 -0400 EDT m=+0.087955671 container create d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, app=test, io.containers.autoupdate=registry)\nJul 07 20:13:56 managed-node2 kernel: podman1: port 2(veth1) entered blocking state\nJul 07 20:13:56 managed-node2 kernel: podman1: port 2(veth1) entered disabled state\nJul 07 20:13:56 managed-node2 kernel: veth1: entered allmulticast mode\nJul 07 20:13:56 managed-node2 kernel: veth1: entered promiscuous mode\nJul 07 20:13:56 managed-node2 NetworkManager[644]: [1751933636.7464] manager: (veth1): new Veth device (/org/freedesktop/NetworkManager/Devices/7)\nJul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.695533824 -0400 EDT m=+0.065447531 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610\nJul 07 20:13:56 managed-node2 kernel: podman1: port 2(veth1) entered blocking state\nJul 07 20:13:56 managed-node2 kernel: podman1: port 2(veth1) entered forwarding state\nJul 07 20:13:56 managed-node2 NetworkManager[644]: [1751933636.7489] device (veth1): carrier: link connected\nJul 07 20:13:56 managed-node2 systemd-udevd[33860]: Network interface NamePolicy= disabled on kernel command line.\nJul 07 20:13:56 managed-node2 systemd[1]: Started libpod-conmon-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope.\n\u2591\u2591 Subject: A start job for unit libpod-conmon-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit libpod-conmon-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1646.\nJul 07 20:13:56 managed-node2 systemd[1]: Started libcrun container.\n\u2591\u2591 Subject: A start job for unit libpod-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit libpod-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1651.\nJul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.874054258 -0400 EDT m=+0.243967914 container init f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b (image=, name=06637f87acc7-infra, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7)\nJul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.877423946 -0400 EDT m=+0.247337594 container start f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b (image=, name=06637f87acc7-infra, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7)\nJul 07 20:13:56 managed-node2 systemd[1]: Started libpod-conmon-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope.\n\u2591\u2591 Subject: A start job for unit libpod-conmon-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit libpod-conmon-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1656.\nJul 07 20:13:56 managed-node2 systemd[1]: Started libcrun container.\n\u2591\u2591 Subject: A start job for unit libpod-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit libpod-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1661.\nJul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.931183676 -0400 EDT m=+0.301097308 container init d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry)\nJul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.93420031 -0400 EDT m=+0.304113971 container start d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry, app=test)\nJul 07 20:13:56 managed-node2 podman[33836]: 2025-07-07 20:13:56.940215927 -0400 EDT m=+0.310129474 pod start 06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7 (image=, name=httpd3)\nJul 07 20:13:57 managed-node2 python3.9[34065]: ansible-systemd Invoked with daemon_reload=True scope=system daemon_reexec=False no_block=False name=None state=None enabled=None force=None masked=None\nJul 07 20:13:57 managed-node2 systemd[1]: Reloading.\nJul 07 20:13:57 managed-node2 systemd-rc-local-generator[34083]: /etc/rc.d/rc.local is not marked executable, skipping.\nJul 07 20:13:58 managed-node2 python3.9[34248]: ansible-systemd Invoked with name=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service scope=system enabled=True daemon_reload=False daemon_reexec=False no_block=False state=None force=None masked=None\nJul 07 20:13:58 managed-node2 systemd[1]: Reloading.\nJul 07 20:13:58 managed-node2 systemd-rc-local-generator[34268]: /etc/rc.d/rc.local is not marked executable, skipping.\nJul 07 20:13:59 managed-node2 python3.9[34433]: ansible-systemd Invoked with name=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service scope=system state=started daemon_reload=False daemon_reexec=False no_block=False enabled=None force=None masked=None\nJul 07 20:13:59 managed-node2 systemd[1]: Starting A template for running K8s workloads via podman-kube-play...\n\u2591\u2591 Subject: A start job for unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1666.\nJul 07 20:13:59 managed-node2 podman[34437]: 2025-07-07 20:13:59.128818064 -0400 EDT m=+0.031043200 pod stop 06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7 (image=, name=httpd3)\nJul 07 20:13:59 managed-node2 systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit NetworkManager-dispatcher.service has successfully entered the 'dead' state.\nJul 07 20:14:09 managed-node2 podman[34437]: time=\"2025-07-07T20:14:09-04:00\" level=warning msg=\"StopSignal SIGTERM failed to stop container httpd3-httpd3 in 10 seconds, resorting to SIGKILL\"\nJul 07 20:14:09 managed-node2 systemd[1]: libpod-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit libpod-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope has successfully entered the 'dead' state.\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.161891677 -0400 EDT m=+10.064117231 container died d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry, app=test, created_at=2021-06-10T18:55:36Z)\nJul 07 20:14:09 managed-node2 systemd[1]: var-lib-containers-storage-overlay-ea9de557ba623f700a03785c93f2fae562cdde6abc47bc4578532dd100d74f80-merged.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay-ea9de557ba623f700a03785c93f2fae562cdde6abc47bc4578532dd100d74f80-merged.mount has successfully entered the 'dead' state.\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.208209429 -0400 EDT m=+10.110434520 container cleanup d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry)\nJul 07 20:14:09 managed-node2 systemd[1]: libpod-conmon-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit libpod-conmon-d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45.scope has successfully entered the 'dead' state.\nJul 07 20:14:09 managed-node2 systemd[1]: libpod-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit libpod-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope has successfully entered the 'dead' state.\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.228424072 -0400 EDT m=+10.130649401 container died f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b (image=, name=06637f87acc7-infra)\nJul 07 20:14:09 managed-node2 kernel: podman1: port 2(veth1) entered disabled state\nJul 07 20:14:09 managed-node2 kernel: veth1 (unregistering): left allmulticast mode\nJul 07 20:14:09 managed-node2 kernel: veth1 (unregistering): left promiscuous mode\nJul 07 20:14:09 managed-node2 kernel: podman1: port 2(veth1) entered disabled state\nJul 07 20:14:09 managed-node2 systemd[1]: run-netns-netns\\x2db10132db\\x2d5af1\\x2d0f8c\\x2d38ab\\x2d1e8eaa97e6f2.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit run-netns-netns\\x2db10132db\\x2d5af1\\x2d0f8c\\x2d38ab\\x2d1e8eaa97e6f2.mount has successfully entered the 'dead' state.\nJul 07 20:14:09 managed-node2 systemd[1]: var-lib-containers-storage-overlay\\x2dcontainers-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b-rootfs-merge.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay\\x2dcontainers-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b-rootfs-merge.mount has successfully entered the 'dead' state.\nJul 07 20:14:09 managed-node2 systemd[1]: var-lib-containers-storage-overlay\\x2dcontainers-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b-userdata-shm.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay\\x2dcontainers-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b-userdata-shm.mount has successfully entered the 'dead' state.\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.330607359 -0400 EDT m=+10.232832448 container cleanup f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b (image=, name=06637f87acc7-infra, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7)\nJul 07 20:14:09 managed-node2 systemd[1]: libpod-conmon-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit libpod-conmon-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope has successfully entered the 'dead' state.\nJul 07 20:14:09 managed-node2 systemd[1]: Stopped libpod-conmon-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope.\n\u2591\u2591 Subject: A stop job for unit libpod-conmon-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit libpod-conmon-f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b.scope has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1740 and the job result is done.\nJul 07 20:14:09 managed-node2 systemd[1]: Removed slice cgroup machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice.\n\u2591\u2591 Subject: A stop job for unit machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1739 and the job result is done.\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.33777977 -0400 EDT m=+10.240004889 pod stop 06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7 (image=, name=httpd3)\nJul 07 20:14:09 managed-node2 systemd[1]: machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice: Failed to open /run/systemd/transient/machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice: No such file or directory\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.341042664 -0400 EDT m=+10.243267751 pod stop 06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7 (image=, name=httpd3)\nJul 07 20:14:09 managed-node2 systemd[1]: machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice: Failed to open /run/systemd/transient/machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice: No such file or directory\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.366974147 -0400 EDT m=+10.269199273 container remove d74dcb0feb4b2f7367b3593b2b1d500c6ff791217fda954825ae84295d4ffb45 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry)\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.392885411 -0400 EDT m=+10.295110535 container remove f3c621d4a012f9702f3df80e4263fb77e28ae936d038a9e9e8920a7aa139304b (image=, name=06637f87acc7-infra, pod_id=06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7)\nJul 07 20:14:09 managed-node2 systemd[1]: machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice: Failed to open /run/systemd/transient/machine-libpod_pod_06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7.slice: No such file or directory\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.400728494 -0400 EDT m=+10.302953580 pod remove 06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7 (image=, name=httpd3)\nJul 07 20:14:09 managed-node2 podman[34437]: Pods stopped:\nJul 07 20:14:09 managed-node2 podman[34437]: 06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7\nJul 07 20:14:09 managed-node2 podman[34437]: Pods removed:\nJul 07 20:14:09 managed-node2 podman[34437]: 06637f87acc7b2b91df9422641042c9bc8ef95a8d2efb39c17ef4dcdaae39bf7\nJul 07 20:14:09 managed-node2 podman[34437]: Secrets removed:\nJul 07 20:14:09 managed-node2 podman[34437]: Volumes removed:\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.40084298 -0400 EDT m=+10.303068230 network create 0a842076c75338ed23c6283f120afac661325ee3d3188d4246c57fcbd0ff921c (name=podman-default-kube-network, type=bridge)\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.421632285 -0400 EDT m=+10.323857401 container create eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc (image=, name=55c9b4d93968-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service)\nJul 07 20:14:09 managed-node2 systemd[1]: Created slice cgroup machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice.\n\u2591\u2591 Subject: A start job for unit machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1741.\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.456874206 -0400 EDT m=+10.359099322 container create e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641 (image=, name=0b34d5e9f949-infra, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service)\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.463729321 -0400 EDT m=+10.365954523 pod create 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36 (image=, name=httpd3)\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.466690532 -0400 EDT m=+10.368915893 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.495205758 -0400 EDT m=+10.397430876 container create 9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, created_by=test/system/build-testimage, io.buildah.version=1.21.0, app=test, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service, created_at=2021-06-10T18:55:36Z)\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.495540708 -0400 EDT m=+10.397765831 container restart eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc (image=, name=55c9b4d93968-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service)\nJul 07 20:14:09 managed-node2 systemd[1]: Started libcrun container.\n\u2591\u2591 Subject: A start job for unit libpod-eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc.scope has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit libpod-eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc.scope has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1745.\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.541938993 -0400 EDT m=+10.444164111 container init eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc (image=, name=55c9b4d93968-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service)\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.54556115 -0400 EDT m=+10.447786446 container start eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc (image=, name=55c9b4d93968-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service)\nJul 07 20:14:09 managed-node2 kernel: podman1: port 2(veth1) entered blocking state\nJul 07 20:14:09 managed-node2 kernel: podman1: port 2(veth1) entered disabled state\nJul 07 20:14:09 managed-node2 kernel: veth1: entered allmulticast mode\nJul 07 20:14:09 managed-node2 kernel: veth1: entered promiscuous mode\nJul 07 20:14:09 managed-node2 kernel: podman1: port 2(veth1) entered blocking state\nJul 07 20:14:09 managed-node2 kernel: podman1: port 2(veth1) entered forwarding state\nJul 07 20:14:09 managed-node2 NetworkManager[644]: [1751933649.5632] manager: (veth1): new Veth device (/org/freedesktop/NetworkManager/Devices/8)\nJul 07 20:14:09 managed-node2 NetworkManager[644]: [1751933649.5684] device (veth1): carrier: link connected\nJul 07 20:14:09 managed-node2 systemd-udevd[34477]: Network interface NamePolicy= disabled on kernel command line.\nJul 07 20:14:09 managed-node2 systemd[1]: Started libcrun container.\n\u2591\u2591 Subject: A start job for unit libpod-e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641.scope has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit libpod-e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641.scope has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1749.\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.678444807 -0400 EDT m=+10.580670010 container init e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641 (image=, name=0b34d5e9f949-infra, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service)\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.681382862 -0400 EDT m=+10.583608134 container start e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641 (image=, name=0b34d5e9f949-infra, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service)\nJul 07 20:14:09 managed-node2 systemd[1]: Started libcrun container.\n\u2591\u2591 Subject: A start job for unit libpod-9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109.scope has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit libpod-9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109.scope has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1754.\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.728795591 -0400 EDT m=+10.631020730 container init 9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service, app=test)\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.73177019 -0400 EDT m=+10.633995385 container start 9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, io.buildah.version=1.21.0, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage)\nJul 07 20:14:09 managed-node2 podman[34437]: 2025-07-07 20:14:09.737566302 -0400 EDT m=+10.639791423 pod start 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36 (image=, name=httpd3)\nJul 07 20:14:09 managed-node2 podman[34437]: Pod:\nJul 07 20:14:09 managed-node2 podman[34437]: 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36\nJul 07 20:14:09 managed-node2 podman[34437]: Container:\nJul 07 20:14:09 managed-node2 podman[34437]: 9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109\nJul 07 20:14:09 managed-node2 systemd[1]: Started A template for running K8s workloads via podman-kube-play.\n\u2591\u2591 Subject: A start job for unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1666.\nJul 07 20:14:10 managed-node2 sudo[34704]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:14:10 managed-node2 sudo[34704]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-cflwckudrftyvniytbtokrawwncefyyk ; /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933650.260143-14379-210639111977054/AnsiballZ_command.py'\nJul 07 20:14:10 managed-node2 sudo[34704]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0)\nJul 07 20:14:10 managed-node2 python3.9[34706]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod inspect httpd1 --format '{{.State}}' _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:14:10 managed-node2 systemd[27808]: Started podman-34715.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 110.\nJul 07 20:14:10 managed-node2 sudo[34704]: pam_unix(sudo:session): session closed for user podman_basic_user\nJul 07 20:14:10 managed-node2 python3.9[34872]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod inspect httpd2 --format '{{.State}}' _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:14:11 managed-node2 python3.9[35029]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod inspect httpd3 --format '{{.State}}' _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:14:11 managed-node2 sudo[35186]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:14:11 managed-node2 sudo[35186]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-utoemynxahtksrgkxmppktxcnibfjzhy ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933651.5642703-14440-50388675366557/AnsiballZ_command.py'\nJul 07 20:14:11 managed-node2 sudo[35186]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0)\nJul 07 20:14:11 managed-node2 python3.9[35188]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail\n systemctl --user list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd1[.]yml[.]service[ ]+loaded[ ]+active '\n _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:14:11 managed-node2 sudo[35186]: pam_unix(sudo:session): session closed for user podman_basic_user\nJul 07 20:14:12 managed-node2 python3.9[35340]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail\n systemctl --system list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd2[.]yml[.]service[ ]+loaded[ ]+active '\n _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:14:12 managed-node2 python3.9[35492]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail\n systemctl --system list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd3[.]yml[.]service[ ]+loaded[ ]+active '\n _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:14:13 managed-node2 python3.9[35644]: ansible-ansible.legacy.uri Invoked with url=http://localhost:15001/index.txt return_content=True force=False http_agent=ansible-httpget use_proxy=True validate_certs=True force_basic_auth=False use_gssapi=False body_format=raw method=GET follow_redirects=safe status_code=[200] timeout=30 headers={} remote_src=False unredirected_headers=[] decompress=True use_netrc=True unsafe_writes=False url_username=None url_password=NOT_LOGGING_PARAMETER client_cert=None client_key=None dest=None body=None src=None creates=None removes=None unix_socket=None ca_path=None ciphers=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:14:13 managed-node2 python3.9[35795]: ansible-ansible.legacy.uri Invoked with url=http://localhost:15002/index.txt return_content=True force=False http_agent=ansible-httpget use_proxy=True validate_certs=True force_basic_auth=False use_gssapi=False body_format=raw method=GET follow_redirects=safe status_code=[200] timeout=30 headers={} remote_src=False unredirected_headers=[] decompress=True use_netrc=True unsafe_writes=False url_username=None url_password=NOT_LOGGING_PARAMETER client_cert=None client_key=None dest=None body=None src=None creates=None removes=None unix_socket=None ca_path=None ciphers=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:14:14 managed-node2 python3.9[35945]: ansible-ansible.legacy.command Invoked with _raw_params=ls -alrtF /tmp/lsr_8xkyz6d8_podman/httpd1-create _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:14:14 managed-node2 python3.9[36095]: ansible-ansible.legacy.command Invoked with _raw_params=ls -alrtF /tmp/lsr_8xkyz6d8_podman/httpd2-create _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:14:14 managed-node2 python3.9[36245]: ansible-ansible.legacy.command Invoked with _raw_params=ls -alrtF /tmp/lsr_8xkyz6d8_podman/httpd3-create _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:14:17 managed-node2 python3.9[36544]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:14:18 managed-node2 python3.9[36699]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:14:21 managed-node2 python3.9[36850]: ansible-ansible.legacy.dnf Invoked with name=['firewalld'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None\nJul 07 20:14:23 managed-node2 python3.9[37000]: ansible-systemd Invoked with name=firewalld masked=False daemon_reload=False daemon_reexec=False scope=system no_block=False state=None enabled=None force=None\nJul 07 20:14:23 managed-node2 python3.9[37151]: ansible-ansible.legacy.systemd Invoked with name=firewalld enabled=True state=started daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None\nJul 07 20:14:24 managed-node2 python3.9[37302]: ansible-fedora.linux_system_roles.firewall_lib Invoked with port=['15001-15003/tcp'] permanent=True runtime=True state=enabled __report_changed=True online=True service=[] source_port=[] forward_port=[] rich_rule=[] source=[] interface=[] interface_pci_id=[] icmp_block=[] timeout=0 ipset_entries=[] protocol=[] helper_module=[] destination=[] includes=[] firewalld_conf=None masquerade=None icmp_block_inversion=None target=None zone=None set_default_zone=None ipset=None ipset_type=None description=None short=None\nJul 07 20:14:26 managed-node2 python3.9[37451]: ansible-ansible.legacy.dnf Invoked with name=['python3-libselinux', 'python3-policycoreutils'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None\nJul 07 20:14:27 managed-node2 python3.9[37601]: ansible-ansible.legacy.dnf Invoked with name=['grubby'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None\nJul 07 20:14:29 managed-node2 python3.9[37751]: ansible-ansible.legacy.dnf Invoked with name=['policycoreutils-python-utils'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None\nJul 07 20:14:30 managed-node2 python3.9[37901]: ansible-setup Invoked with filter=['ansible_selinux'] gather_subset=['all'] gather_timeout=10 fact_path=/etc/ansible/facts.d\nJul 07 20:14:32 managed-node2 python3.9[38089]: ansible-fedora.linux_system_roles.local_seport Invoked with ports=['15001-15003'] proto=tcp setype=http_port_t state=present local=False ignore_selinux_state=False reload=True\nJul 07 20:14:33 managed-node2 python3.9[38238]: ansible-fedora.linux_system_roles.selinux_modules_facts Invoked\nJul 07 20:14:37 managed-node2 python3.9[38387]: ansible-getent Invoked with database=passwd key=podman_basic_user fail_key=False service=None split=None\nJul 07 20:14:38 managed-node2 python3.9[38537]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:14:38 managed-node2 python3.9[38688]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:14:39 managed-node2 python3.9[38838]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:14:40 managed-node2 python3.9[38988]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:14:40 managed-node2 python3.9[39138]: ansible-ansible.legacy.command Invoked with creates=/var/lib/systemd/linger/podman_basic_user _raw_params=loginctl enable-linger podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None removes=None stdin=None\nJul 07 20:14:41 managed-node2 python3.9[39287]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd1 state=directory owner=podman_basic_user group=3001 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:14:41 managed-node2 python3.9[39436]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd1-create state=directory owner=podman_basic_user group=3001 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:14:42 managed-node2 sudo[39585]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:14:42 managed-node2 sudo[39585]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dteqtauzgahpdwlqmxqoqvigwdlcbwgx ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933682.0866897-15498-43758369682888/AnsiballZ_podman_image.py'\nJul 07 20:14:42 managed-node2 sudo[39585]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0)\nJul 07 20:14:42 managed-node2 systemd[27808]: Started podman-39588.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 114.\nJul 07 20:14:42 managed-node2 systemd[27808]: Started podman-39596.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 118.\nJul 07 20:14:42 managed-node2 systemd[27808]: Started podman-39604.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 122.\nJul 07 20:14:42 managed-node2 systemd[27808]: Started podman-39611.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 126.\nJul 07 20:14:42 managed-node2 systemd[27808]: Started podman-39618.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 130.\nJul 07 20:14:43 managed-node2 systemd[27808]: Started podman-39626.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 134.\nJul 07 20:14:43 managed-node2 sudo[39585]: pam_unix(sudo:session): session closed for user podman_basic_user\nJul 07 20:14:43 managed-node2 python3.9[39782]: ansible-stat Invoked with path=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:14:43 managed-node2 python3.9[39933]: ansible-file Invoked with path=/home/podman_basic_user/.config/containers/ansible-kubernetes.d state=directory owner=podman_basic_user group=3001 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:14:44 managed-node2 python3.9[40082]: ansible-ansible.legacy.stat Invoked with path=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True\nJul 07 20:14:44 managed-node2 python3.9[40157]: ansible-ansible.legacy.file Invoked with owner=podman_basic_user group=3001 mode=0644 dest=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml _original_basename=.3ieew216 recurse=False state=file path=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:14:44 managed-node2 sudo[40306]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:14:44 managed-node2 sudo[40306]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ustvjrkhcohlxhhxarkriiiprsdnnhal ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933684.6664407-15601-193248781752908/AnsiballZ_podman_play.py'\nJul 07 20:14:44 managed-node2 sudo[40306]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0)\nJul 07 20:14:44 managed-node2 python3.9[40308]: ansible-containers.podman.podman_play Invoked with state=started debug=True log_level=debug kube_file=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None quiet=None recreate=None userns=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None\nJul 07 20:14:45 managed-node2 systemd[27808]: Started podman-40315.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 138.\nJul 07 20:14:45 managed-node2 systemd[27808]: Created slice cgroup user-libpod_pod_f60d38092e129513b51a0b2f07fc5347e46ea863cdd2a5b1d5752245e63e591c.slice.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 142.\nJul 07 20:14:45 managed-node2 python3.9[40308]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE command: /bin/podman play kube --start=true --log-level=debug /home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml\nJul 07 20:14:45 managed-node2 python3.9[40308]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stdout: \nJul 07 20:14:45 managed-node2 python3.9[40308]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stderr: time=\"2025-07-07T20:14:45-04:00\" level=info msg=\"/bin/podman filtering at log level debug\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Called kube.PersistentPreRunE(/bin/podman play kube --start=true --log-level=debug /home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml)\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Using conmon: \\\"/usr/bin/conmon\\\"\"\n time=\"2025-07-07T20:14:45-04:00\" level=info msg=\"Using sqlite as database backend\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"systemd-logind: Unknown object '/'.\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Using graph driver overlay\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Using graph root /home/podman_basic_user/.local/share/containers/storage\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Using run root /run/user/3001/containers\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Using static dir /home/podman_basic_user/.local/share/containers/storage/libpod\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Using tmp dir /run/user/3001/libpod/tmp\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Using volume path /home/podman_basic_user/.local/share/containers/storage/volumes\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Using transient store: false\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"[graphdriver] trying provided driver \\\"overlay\\\"\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Cached value indicated that overlay is supported\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Cached value indicated that overlay is supported\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Cached value indicated that metacopy is not being used\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Cached value indicated that native-diff is usable\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"backingFs=xfs, projectQuotaSupported=false, useNativeDiff=true, usingMetacopy=false\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Initializing event backend file\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Configured OCI runtime krun initialization failed: no valid executable found for OCI runtime krun: invalid argument\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Configured OCI runtime crun-vm initialization failed: no valid executable found for OCI runtime crun-vm: invalid argument\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Configured OCI runtime crun-wasm initialization failed: no valid executable found for OCI runtime crun-wasm: invalid argument\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Configured OCI runtime runc initialization failed: no valid executable found for OCI runtime runc: invalid argument\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Configured OCI runtime ocijail initialization failed: no valid executable found for OCI runtime ocijail: invalid argument\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Configured OCI runtime runj initialization failed: no valid executable found for OCI runtime runj: invalid argument\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Configured OCI runtime kata initialization failed: no valid executable found for OCI runtime kata: invalid argument\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Configured OCI runtime runsc initialization failed: no valid executable found for OCI runtime runsc: invalid argument\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Configured OCI runtime youki initialization failed: no valid executable found for OCI runtime youki: invalid argument\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Using OCI runtime \\\"/usr/bin/crun\\\"\"\n time=\"2025-07-07T20:14:45-04:00\" level=info msg=\"Setting parallel job count to 7\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Successfully loaded network podman-default-kube-network: &{podman-default-kube-network f726a0dfc720eef9b785c3acdef2ddc0ef169e999e9185270f7b5fdceae44256 bridge podman1 2025-07-07 20:13:16.261934543 -0400 EDT [{{{10.89.0.0 ffffff00}} 10.89.0.1 }] [] false false true [] map[] map[] map[driver:host-local]}\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Successfully loaded 2 networks\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Pod using bridge network mode\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Created cgroup path user.slice/user-libpod_pod_f60d38092e129513b51a0b2f07fc5347e46ea863cdd2a5b1d5752245e63e591c.slice for parent user.slice and name libpod_pod_f60d38092e129513b51a0b2f07fc5347e46ea863cdd2a5b1d5752245e63e591c\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Created cgroup user.slice/user-libpod_pod_f60d38092e129513b51a0b2f07fc5347e46ea863cdd2a5b1d5752245e63e591c.slice\"\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Got pod cgroup as user.slice/user-3001.slice/user@3001.service/user.slice/user-libpod_pod_f60d38092e129513b51a0b2f07fc5347e46ea863cdd2a5b1d5752245e63e591c.slice\"\n Error: adding pod to state: name \"httpd1\" is in use: pod already exists\n time=\"2025-07-07T20:14:45-04:00\" level=debug msg=\"Shutting down engines\"\n time=\"2025-07-07T20:14:45-04:00\" level=info msg=\"Received shutdown.Stop(), terminating!\" PID=40315\nJul 07 20:14:45 managed-node2 python3.9[40308]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE rc: 125\nJul 07 20:14:45 managed-node2 sudo[40306]: pam_unix(sudo:session): session closed for user podman_basic_user\nJul 07 20:14:46 managed-node2 python3.9[40471]: ansible-getent Invoked with database=passwd key=root fail_key=False service=None split=None\nJul 07 20:14:46 managed-node2 python3.9[40621]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:14:47 managed-node2 python3.9[40772]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/httpd2.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:14:49 managed-node2 python3.9[40922]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd2 state=directory owner=root group=root recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:14:49 managed-node2 python3.9[41071]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd2-create state=directory owner=root group=root recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:14:50 managed-node2 podman[41251]: 2025-07-07 20:14:50.30172649 -0400 EDT m=+0.335741630 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610\nJul 07 20:14:50 managed-node2 python3.9[41415]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd2.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:14:51 managed-node2 python3.9[41566]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d state=directory owner=root group=0 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:14:51 managed-node2 python3.9[41715]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd2.yml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True\nJul 07 20:14:51 managed-node2 python3.9[41790]: ansible-ansible.legacy.file Invoked with owner=root group=0 mode=0644 dest=/etc/containers/ansible-kubernetes.d/httpd2.yml _original_basename=.7tnd0tsm recurse=False state=file path=/etc/containers/ansible-kubernetes.d/httpd2.yml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:14:52 managed-node2 python3.9[41939]: ansible-containers.podman.podman_play Invoked with state=started debug=True log_level=debug kube_file=/etc/containers/ansible-kubernetes.d/httpd2.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None quiet=None recreate=None userns=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None\nJul 07 20:14:52 managed-node2 podman[41946]: 2025-07-07 20:14:52.281267633 -0400 EDT m=+0.019255481 network create 0a842076c75338ed23c6283f120afac661325ee3d3188d4246c57fcbd0ff921c (name=podman-default-kube-network, type=bridge)\nJul 07 20:14:52 managed-node2 systemd[1]: Created slice cgroup machine-libpod_pod_55cf40f422ab8478e9c4da3bb3e4a8e5c2ce02f1f51e3c1d7ff9913ae8d6b45a.slice.\n\u2591\u2591 Subject: A start job for unit machine-libpod_pod_55cf40f422ab8478e9c4da3bb3e4a8e5c2ce02f1f51e3c1d7ff9913ae8d6b45a.slice has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit machine-libpod_pod_55cf40f422ab8478e9c4da3bb3e4a8e5c2ce02f1f51e3c1d7ff9913ae8d6b45a.slice has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1759.\nJul 07 20:14:52 managed-node2 python3.9[41939]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE command: /usr/bin/podman play kube --start=true --log-level=debug /etc/containers/ansible-kubernetes.d/httpd2.yml\nJul 07 20:14:52 managed-node2 python3.9[41939]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stdout: \nJul 07 20:14:52 managed-node2 python3.9[41939]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stderr: time=\"2025-07-07T20:14:52-04:00\" level=info msg=\"/usr/bin/podman filtering at log level debug\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Called kube.PersistentPreRunE(/usr/bin/podman play kube --start=true --log-level=debug /etc/containers/ansible-kubernetes.d/httpd2.yml)\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Using conmon: \\\"/usr/bin/conmon\\\"\"\n time=\"2025-07-07T20:14:52-04:00\" level=info msg=\"Using sqlite as database backend\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Using graph driver overlay\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Using graph root /var/lib/containers/storage\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Using run root /run/containers/storage\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Using static dir /var/lib/containers/storage/libpod\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Using tmp dir /run/libpod\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Using volume path /var/lib/containers/storage/volumes\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Using transient store: false\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"[graphdriver] trying provided driver \\\"overlay\\\"\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Cached value indicated that overlay is supported\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Cached value indicated that overlay is supported\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Cached value indicated that metacopy is being used\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Cached value indicated that native-diff is not being used\"\n time=\"2025-07-07T20:14:52-04:00\" level=info msg=\"Not using native diff for overlay, this may cause degraded performance for building images: kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"backingFs=xfs, projectQuotaSupported=false, useNativeDiff=false, usingMetacopy=true\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Initializing event backend journald\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Configured OCI runtime runc initialization failed: no valid executable found for OCI runtime runc: invalid argument\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Configured OCI runtime runj initialization failed: no valid executable found for OCI runtime runj: invalid argument\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Configured OCI runtime kata initialization failed: no valid executable found for OCI runtime kata: invalid argument\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Configured OCI runtime krun initialization failed: no valid executable found for OCI runtime krun: invalid argument\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Configured OCI runtime ocijail initialization failed: no valid executable found for OCI runtime ocijail: invalid argument\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Configured OCI runtime crun-vm initialization failed: no valid executable found for OCI runtime crun-vm: invalid argument\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Configured OCI runtime runsc initialization failed: no valid executable found for OCI runtime runsc: invalid argument\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Configured OCI runtime youki initialization failed: no valid executable found for OCI runtime youki: invalid argument\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Configured OCI runtime crun-wasm initialization failed: no valid executable found for OCI runtime crun-wasm: invalid argument\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Using OCI runtime \\\"/usr/bin/crun\\\"\"\n time=\"2025-07-07T20:14:52-04:00\" level=info msg=\"Setting parallel job count to 7\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Successfully loaded network podman-default-kube-network: &{podman-default-kube-network 0a842076c75338ed23c6283f120afac661325ee3d3188d4246c57fcbd0ff921c bridge podman1 2025-07-07 20:11:21.084048926 -0400 EDT [{{{10.89.0.0 ffffff00}} 10.89.0.1 }] [] false false true [] map[] map[] map[driver:host-local]}\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Successfully loaded 2 networks\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Pod using bridge network mode\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Created cgroup path machine.slice/machine-libpod_pod_55cf40f422ab8478e9c4da3bb3e4a8e5c2ce02f1f51e3c1d7ff9913ae8d6b45a.slice for parent machine.slice and name libpod_pod_55cf40f422ab8478e9c4da3bb3e4a8e5c2ce02f1f51e3c1d7ff9913ae8d6b45a\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Created cgroup machine.slice/machine-libpod_pod_55cf40f422ab8478e9c4da3bb3e4a8e5c2ce02f1f51e3c1d7ff9913ae8d6b45a.slice\"\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Got pod cgroup as machine.slice/machine-libpod_pod_55cf40f422ab8478e9c4da3bb3e4a8e5c2ce02f1f51e3c1d7ff9913ae8d6b45a.slice\"\n Error: adding pod to state: name \"httpd2\" is in use: pod already exists\n time=\"2025-07-07T20:14:52-04:00\" level=debug msg=\"Shutting down engines\"\nJul 07 20:14:52 managed-node2 python3.9[41939]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE rc: 125\nJul 07 20:14:53 managed-node2 python3.9[42102]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:14:55 managed-node2 python3.9[42253]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/httpd3.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:14:56 managed-node2 python3.9[42403]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd3 state=directory owner=root group=root recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:14:56 managed-node2 python3.9[42552]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman/httpd3-create state=directory owner=root group=root recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:14:57 managed-node2 podman[42732]: 2025-07-07 20:14:57.595089727 -0400 EDT m=+0.334374931 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610\nJul 07 20:14:57 managed-node2 python3.9[42895]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd3.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:14:58 managed-node2 python3.9[43046]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d state=directory owner=root group=0 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:14:59 managed-node2 python3.9[43195]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd3.yml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True\nJul 07 20:14:59 managed-node2 python3.9[43270]: ansible-ansible.legacy.file Invoked with owner=root group=0 mode=0644 dest=/etc/containers/ansible-kubernetes.d/httpd3.yml _original_basename=.fnfhf1h4 recurse=False state=file path=/etc/containers/ansible-kubernetes.d/httpd3.yml force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:14:59 managed-node2 python3.9[43419]: ansible-containers.podman.podman_play Invoked with state=started kube_file=/etc/containers/ansible-kubernetes.d/httpd3.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None debug=None quiet=None recreate=None userns=None log_level=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None\nJul 07 20:14:59 managed-node2 podman[43426]: 2025-07-07 20:14:59.794215832 -0400 EDT m=+0.017981924 network create 0a842076c75338ed23c6283f120afac661325ee3d3188d4246c57fcbd0ff921c (name=podman-default-kube-network, type=bridge)\nJul 07 20:14:59 managed-node2 systemd[1]: Created slice cgroup machine-libpod_pod_a956533ce71c546925cb35266c34fb208b1e49cd00e4934b1886b8ae13aea530.slice.\n\u2591\u2591 Subject: A start job for unit machine-libpod_pod_a956533ce71c546925cb35266c34fb208b1e49cd00e4934b1886b8ae13aea530.slice has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit machine-libpod_pod_a956533ce71c546925cb35266c34fb208b1e49cd00e4934b1886b8ae13aea530.slice has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1763.\nJul 07 20:15:00 managed-node2 sudo[43582]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:15:00 managed-node2 sudo[43582]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-copmprovbtozwjdqvrxslhkmftgtigcs ; /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933700.520565-16475-105270240946276/AnsiballZ_command.py'\nJul 07 20:15:00 managed-node2 sudo[43582]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0)\nJul 07 20:15:00 managed-node2 python3.9[43584]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod inspect httpd1 --format '{{.State}}' _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:15:00 managed-node2 systemd[27808]: Started podman-43592.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 146.\nJul 07 20:15:00 managed-node2 sudo[43582]: pam_unix(sudo:session): session closed for user podman_basic_user\nJul 07 20:15:01 managed-node2 python3.9[43750]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod inspect httpd2 --format '{{.State}}' _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:15:01 managed-node2 python3.9[43908]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod inspect httpd3 --format '{{.State}}' _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:15:02 managed-node2 sudo[44065]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:15:02 managed-node2 sudo[44065]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-omhmgepybavqbezpokrriumisrazocox ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933701.8765578-16521-15111276780862/AnsiballZ_command.py'\nJul 07 20:15:02 managed-node2 sudo[44065]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0)\nJul 07 20:15:02 managed-node2 python3.9[44067]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail\n systemctl --user list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd1[.]yml[.]service[ ]+loaded[ ]+active '\n _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:15:02 managed-node2 sudo[44065]: pam_unix(sudo:session): session closed for user podman_basic_user\nJul 07 20:15:02 managed-node2 python3.9[44219]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail\n systemctl --system list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd2[.]yml[.]service[ ]+loaded[ ]+active '\n _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:15:02 managed-node2 python3.9[44371]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail\n systemctl --system list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd3[.]yml[.]service[ ]+loaded[ ]+active '\n _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:15:03 managed-node2 python3.9[44523]: ansible-ansible.legacy.uri Invoked with url=http://localhost:15001/index.txt return_content=True force=False http_agent=ansible-httpget use_proxy=True validate_certs=True force_basic_auth=False use_gssapi=False body_format=raw method=GET follow_redirects=safe status_code=[200] timeout=30 headers={} remote_src=False unredirected_headers=[] decompress=True use_netrc=True unsafe_writes=False url_username=None url_password=NOT_LOGGING_PARAMETER client_cert=None client_key=None dest=None body=None src=None creates=None removes=None unix_socket=None ca_path=None ciphers=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:15:03 managed-node2 python3.9[44673]: ansible-ansible.legacy.uri Invoked with url=http://localhost:15002/index.txt return_content=True force=False http_agent=ansible-httpget use_proxy=True validate_certs=True force_basic_auth=False use_gssapi=False body_format=raw method=GET follow_redirects=safe status_code=[200] timeout=30 headers={} remote_src=False unredirected_headers=[] decompress=True use_netrc=True unsafe_writes=False url_username=None url_password=NOT_LOGGING_PARAMETER client_cert=None client_key=None dest=None body=None src=None creates=None removes=None unix_socket=None ca_path=None ciphers=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:15:04 managed-node2 python3.9[44823]: ansible-ansible.legacy.uri Invoked with url=http://localhost:15003/index.txt return_content=True force=False http_agent=ansible-httpget use_proxy=True validate_certs=True force_basic_auth=False use_gssapi=False body_format=raw method=GET follow_redirects=safe status_code=[200] timeout=30 headers={} remote_src=False unredirected_headers=[] decompress=True use_netrc=True unsafe_writes=False url_username=None url_password=NOT_LOGGING_PARAMETER client_cert=None client_key=None dest=None body=None src=None creates=None removes=None unix_socket=None ca_path=None ciphers=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:15:07 managed-node2 python3.9[45122]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:15:08 managed-node2 python3.9[45277]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:15:11 managed-node2 python3.9[45428]: ansible-getent Invoked with database=passwd key=podman_basic_user fail_key=False service=None split=None\nJul 07 20:15:12 managed-node2 python3.9[45578]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:15:12 managed-node2 python3.9[45729]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:15:12 managed-node2 python3.9[45879]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:15:14 managed-node2 python3.9[46029]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:15:14 managed-node2 python3.9[46179]: ansible-stat Invoked with path=/run/user/3001 follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:15:15 managed-node2 sudo[46330]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:15:15 managed-node2 sudo[46330]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-yrlqohfxmrbjsiwrdleklbogdfrytzax ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933715.029206-17136-162272482862195/AnsiballZ_systemd.py'\nJul 07 20:15:15 managed-node2 sudo[46330]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0)\nJul 07 20:15:15 managed-node2 python3.9[46332]: ansible-systemd Invoked with name=podman-kube@-home-podman_basic_user-.config-containers-ansible\\x2dkubernetes.d-httpd1.yml.service scope=user state=stopped enabled=False daemon_reload=False daemon_reexec=False no_block=False force=None masked=None\nJul 07 20:15:15 managed-node2 systemd[27808]: Reloading.\nJul 07 20:15:15 managed-node2 systemd[27808]: Stopping A template for running K8s workloads via podman-kube-play...\n\u2591\u2591 Subject: A stop job for unit UNIT has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 150.\nJul 07 20:15:25 managed-node2 podman[46347]: time=\"2025-07-07T20:15:25-04:00\" level=warning msg=\"StopSignal SIGTERM failed to stop container httpd1-httpd1 in 10 seconds, resorting to SIGKILL\"\nJul 07 20:15:25 managed-node2 kernel: podman1: port 1(veth0) entered disabled state\nJul 07 20:15:25 managed-node2 kernel: veth0 (unregistering): left allmulticast mode\nJul 07 20:15:25 managed-node2 kernel: veth0 (unregistering): left promiscuous mode\nJul 07 20:15:25 managed-node2 kernel: podman1: port 1(veth0) entered disabled state\nJul 07 20:15:25 managed-node2 systemd[27808]: Removed slice cgroup user-libpod_pod_a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4.slice.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 151 and the job result is done.\nJul 07 20:15:25 managed-node2 systemd[27808]: user-libpod_pod_a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4.slice: Failed to open /run/user/3001/systemd/transient/user-libpod_pod_a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4.slice: No such file or directory\nJul 07 20:15:25 managed-node2 systemd[27808]: user-libpod_pod_a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4.slice: Failed to open /run/user/3001/systemd/transient/user-libpod_pod_a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4.slice: No such file or directory\nJul 07 20:15:25 managed-node2 systemd[27808]: user-libpod_pod_a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4.slice: Failed to open /run/user/3001/systemd/transient/user-libpod_pod_a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4.slice: No such file or directory\nJul 07 20:15:26 managed-node2 podman[46347]: Pods stopped:\nJul 07 20:15:26 managed-node2 podman[46347]: a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4\nJul 07 20:15:26 managed-node2 podman[46347]: Pods removed:\nJul 07 20:15:26 managed-node2 podman[46347]: a8c7970eab7195c1f6b985b7266a4cb6839e42f0db7c4c5ffb5b672c0220ecf4\nJul 07 20:15:26 managed-node2 podman[46347]: Secrets removed:\nJul 07 20:15:26 managed-node2 podman[46347]: Volumes removed:\nJul 07 20:15:26 managed-node2 systemd[27808]: Stopped A template for running K8s workloads via podman-kube-play.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 150 and the job result is done.\nJul 07 20:15:26 managed-node2 sudo[46330]: pam_unix(sudo:session): session closed for user podman_basic_user\nJul 07 20:15:26 managed-node2 python3.9[46572]: ansible-stat Invoked with path=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:15:26 managed-node2 sudo[46723]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:15:26 managed-node2 sudo[46723]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ngscqobpoontmmgbeazhbfnhtlrerjma ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933726.6496177-17450-135897393375501/AnsiballZ_podman_play.py'\nJul 07 20:15:26 managed-node2 sudo[46723]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0)\nJul 07 20:15:26 managed-node2 python3.9[46725]: ansible-containers.podman.podman_play Invoked with state=absent debug=True log_level=debug kube_file=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None quiet=None recreate=None userns=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None\nJul 07 20:15:26 managed-node2 python3.9[46725]: ansible-containers.podman.podman_play version: 5.5.1, kube file /home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml\nJul 07 20:15:27 managed-node2 systemd[27808]: Started podman-46732.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 152.\nJul 07 20:15:27 managed-node2 python3.9[46725]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE command: /bin/podman kube play --down /home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml\nJul 07 20:15:27 managed-node2 python3.9[46725]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stdout: Pods stopped:\n Pods removed:\n Secrets removed:\n Volumes removed:\nJul 07 20:15:27 managed-node2 python3.9[46725]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stderr: \nJul 07 20:15:27 managed-node2 python3.9[46725]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE rc: 0\nJul 07 20:15:27 managed-node2 sudo[46723]: pam_unix(sudo:session): session closed for user podman_basic_user\nJul 07 20:15:27 managed-node2 python3.9[46888]: ansible-file Invoked with path=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:15:28 managed-node2 python3.9[47037]: ansible-getent Invoked with database=passwd key=root fail_key=False service=None split=None\nJul 07 20:15:29 managed-node2 python3.9[47187]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:15:31 managed-node2 python3.9[47338]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/httpd2.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:15:31 managed-node2 python3.9[47488]: ansible-systemd Invoked with name=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service scope=system state=stopped enabled=False daemon_reload=False daemon_reexec=False no_block=False force=None masked=None\nJul 07 20:15:31 managed-node2 systemd[1]: Reloading.\nJul 07 20:15:31 managed-node2 systemd-rc-local-generator[47509]: /etc/rc.d/rc.local is not marked executable, skipping.\nJul 07 20:15:32 managed-node2 systemd[1]: Stopping A template for running K8s workloads via podman-kube-play...\n\u2591\u2591 Subject: A stop job for unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1768.\nJul 07 20:15:32 managed-node2 podman[47527]: 2025-07-07 20:15:32.086748492 -0400 EDT m=+0.031435423 pod stop d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0 (image=, name=httpd2)\nJul 07 20:15:42 managed-node2 podman[47527]: time=\"2025-07-07T20:15:42-04:00\" level=warning msg=\"StopSignal SIGTERM failed to stop container httpd2-httpd2 in 10 seconds, resorting to SIGKILL\"\nJul 07 20:15:42 managed-node2 systemd[1]: libpod-3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5.scope: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit libpod-3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5.scope has successfully entered the 'dead' state.\nJul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.121044923 -0400 EDT m=+10.065732151 container died 3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service, app=test)\nJul 07 20:15:42 managed-node2 systemd[1]: var-lib-containers-storage-overlay-6f2f0e89c245bbf36545733fa9225bf8ac05d0ba658f3773aea7623e3da19632-merged.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay-6f2f0e89c245bbf36545733fa9225bf8ac05d0ba658f3773aea7623e3da19632-merged.mount has successfully entered the 'dead' state.\nJul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.165231828 -0400 EDT m=+10.109918731 container cleanup 3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0)\nJul 07 20:15:42 managed-node2 systemd[1]: libpod-08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b.scope: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit libpod-08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b.scope has successfully entered the 'dead' state.\nJul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.176672676 -0400 EDT m=+10.121359827 container died 08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b (image=, name=d17e7ef2e094-infra, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service)\nJul 07 20:15:42 managed-node2 kernel: podman1: port 1(veth0) entered disabled state\nJul 07 20:15:42 managed-node2 kernel: veth0 (unregistering): left allmulticast mode\nJul 07 20:15:42 managed-node2 kernel: veth0 (unregistering): left promiscuous mode\nJul 07 20:15:42 managed-node2 kernel: podman1: port 1(veth0) entered disabled state\nJul 07 20:15:42 managed-node2 systemd[1]: run-netns-netns\\x2d2e00ab52\\x2d0e7b\\x2d94ee\\x2da345\\x2dec17caccc43b.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit run-netns-netns\\x2d2e00ab52\\x2d0e7b\\x2d94ee\\x2da345\\x2dec17caccc43b.mount has successfully entered the 'dead' state.\nJul 07 20:15:42 managed-node2 systemd[1]: var-lib-containers-storage-overlay\\x2dcontainers-08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b-rootfs-merge.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay\\x2dcontainers-08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b-rootfs-merge.mount has successfully entered the 'dead' state.\nJul 07 20:15:42 managed-node2 systemd[1]: var-lib-containers-storage-overlay\\x2dcontainers-08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b-userdata-shm.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay\\x2dcontainers-08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b-userdata-shm.mount has successfully entered the 'dead' state.\nJul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.276854989 -0400 EDT m=+10.221541921 container cleanup 08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b (image=, name=d17e7ef2e094-infra, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service)\nJul 07 20:15:42 managed-node2 systemd[1]: Removed slice cgroup machine-libpod_pod_d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0.slice.\n\u2591\u2591 Subject: A stop job for unit machine-libpod_pod_d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0.slice has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit machine-libpod_pod_d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0.slice has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1770 and the job result is done.\nJul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.302687887 -0400 EDT m=+10.247374820 container remove 3f20d3a577b42f27af5fb5c166086489688c8b51cc076a43434b84ed200823d5 (image=quay.io/libpod/testimage:20210610, name=httpd2-httpd2, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry)\nJul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.329086658 -0400 EDT m=+10.273773592 container remove 08b1e7b74fb7192425335d283ce7a160cb1676905dece8b8c2dadd2d44be0c4b (image=, name=d17e7ef2e094-infra, pod_id=d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service)\nJul 07 20:15:42 managed-node2 systemd[1]: machine-libpod_pod_d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0.slice: Failed to open /run/systemd/transient/machine-libpod_pod_d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0.slice: No such file or directory\nJul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.337213217 -0400 EDT m=+10.281900117 pod remove d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0 (image=, name=httpd2)\nJul 07 20:15:42 managed-node2 systemd[1]: libpod-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32.scope: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit libpod-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32.scope has successfully entered the 'dead' state.\nJul 07 20:15:42 managed-node2 conmon[32226]: conmon 78627b3638a40af7f868 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32.scope/container/memory.events\nJul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.343750156 -0400 EDT m=+10.288437240 container kill 78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32 (image=, name=91ecf1609ad1-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service)\nJul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.349945997 -0400 EDT m=+10.294633088 container died 78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32 (image=, name=91ecf1609ad1-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service)\nJul 07 20:15:42 managed-node2 systemd[1]: var-lib-containers-storage-overlay\\x2dcontainers-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32-rootfs-merge.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay\\x2dcontainers-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32-rootfs-merge.mount has successfully entered the 'dead' state.\nJul 07 20:15:42 managed-node2 podman[47527]: 2025-07-07 20:15:42.409484257 -0400 EDT m=+10.354171379 container remove 78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32 (image=, name=91ecf1609ad1-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service)\nJul 07 20:15:42 managed-node2 podman[47527]: Pods stopped:\nJul 07 20:15:42 managed-node2 podman[47527]: d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0\nJul 07 20:15:42 managed-node2 podman[47527]: Pods removed:\nJul 07 20:15:42 managed-node2 podman[47527]: d17e7ef2e094a5a7c7a16744b32eaee0685ed30cb49021916de1b210324f42b0\nJul 07 20:15:42 managed-node2 podman[47527]: Secrets removed:\nJul 07 20:15:42 managed-node2 podman[47527]: Volumes removed:\nJul 07 20:15:42 managed-node2 systemd[1]: podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service has successfully entered the 'dead' state.\nJul 07 20:15:42 managed-node2 systemd[1]: Stopped A template for running K8s workloads via podman-kube-play.\n\u2591\u2591 Subject: A stop job for unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1768 and the job result is done.\nJul 07 20:15:42 managed-node2 python3.9[47729]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd2.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:15:43 managed-node2 systemd[1]: var-lib-containers-storage-overlay\\x2dcontainers-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32-userdata-shm.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay\\x2dcontainers-78627b3638a40af7f86842eb01978240c761523f65d93ec46b4a42cd494c4e32-userdata-shm.mount has successfully entered the 'dead' state.\nJul 07 20:15:43 managed-node2 python3.9[47880]: ansible-containers.podman.podman_play Invoked with state=absent debug=True log_level=debug kube_file=/etc/containers/ansible-kubernetes.d/httpd2.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None quiet=None recreate=None userns=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None\nJul 07 20:15:43 managed-node2 python3.9[47880]: ansible-containers.podman.podman_play version: 5.5.1, kube file /etc/containers/ansible-kubernetes.d/httpd2.yml\nJul 07 20:15:43 managed-node2 python3.9[47880]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE command: /usr/bin/podman kube play --down /etc/containers/ansible-kubernetes.d/httpd2.yml\nJul 07 20:15:43 managed-node2 python3.9[47880]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stdout: Pods stopped:\n Pods removed:\n Secrets removed:\n Volumes removed:\nJul 07 20:15:43 managed-node2 python3.9[47880]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE stderr: \nJul 07 20:15:43 managed-node2 python3.9[47880]: ansible-containers.podman.podman_play PODMAN-PLAY-KUBE rc: 0\nJul 07 20:15:43 managed-node2 python3.9[48043]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d/httpd2.yml state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:15:45 managed-node2 python3.9[48192]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:15:46 managed-node2 python3.9[48343]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/httpd3.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:15:47 managed-node2 python3.9[48493]: ansible-systemd Invoked with name=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service scope=system state=stopped enabled=False daemon_reload=False daemon_reexec=False no_block=False force=None masked=None\nJul 07 20:15:47 managed-node2 systemd[1]: Reloading.\nJul 07 20:15:47 managed-node2 systemd-rc-local-generator[48513]: /etc/rc.d/rc.local is not marked executable, skipping.\nJul 07 20:15:47 managed-node2 systemd[1]: Stopping A template for running K8s workloads via podman-kube-play...\n\u2591\u2591 Subject: A stop job for unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1771.\nJul 07 20:15:47 managed-node2 podman[48533]: 2025-07-07 20:15:47.405787867 -0400 EDT m=+0.031643471 pod stop 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36 (image=, name=httpd3)\nJul 07 20:15:57 managed-node2 podman[48533]: time=\"2025-07-07T20:15:57-04:00\" level=warning msg=\"StopSignal SIGTERM failed to stop container httpd3-httpd3 in 10 seconds, resorting to SIGKILL\"\nJul 07 20:15:57 managed-node2 systemd[1]: libpod-9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109.scope: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit libpod-9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109.scope has successfully entered the 'dead' state.\nJul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.434010239 -0400 EDT m=+10.059866007 container died 9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0)\nJul 07 20:15:57 managed-node2 systemd[1]: var-lib-containers-storage-overlay-628129360f5470c8a5e4c9e68712c0420c79d4a01d22a8088c316ba43c268778-merged.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay-628129360f5470c8a5e4c9e68712c0420c79d4a01d22a8088c316ba43c268778-merged.mount has successfully entered the 'dead' state.\nJul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.478667416 -0400 EDT m=+10.104522986 container cleanup 9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service)\nJul 07 20:15:57 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state.\nJul 07 20:15:57 managed-node2 systemd[1]: libpod-e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641.scope: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit libpod-e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641.scope has successfully entered the 'dead' state.\nJul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.500190016 -0400 EDT m=+10.126045733 container died e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641 (image=, name=0b34d5e9f949-infra, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service)\nJul 07 20:15:57 managed-node2 systemd[1]: run-rce7152e4cf79441b86b3f3ed7d6f4283.scope: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit run-rce7152e4cf79441b86b3f3ed7d6f4283.scope has successfully entered the 'dead' state.\nJul 07 20:15:57 managed-node2 kernel: podman1: port 2(veth1) entered disabled state\nJul 07 20:15:57 managed-node2 kernel: veth1 (unregistering): left allmulticast mode\nJul 07 20:15:57 managed-node2 kernel: veth1 (unregistering): left promiscuous mode\nJul 07 20:15:57 managed-node2 kernel: podman1: port 2(veth1) entered disabled state\nJul 07 20:15:57 managed-node2 NetworkManager[644]: [1751933757.5423] device (podman1): state change: activated -> unmanaged (reason 'unmanaged', managed-type: 'removed')\nJul 07 20:15:57 managed-node2 systemd[1]: Starting Network Manager Script Dispatcher Service...\n\u2591\u2591 Subject: A start job for unit NetworkManager-dispatcher.service has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit NetworkManager-dispatcher.service has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1773.\nJul 07 20:15:57 managed-node2 systemd[1]: Started Network Manager Script Dispatcher Service.\n\u2591\u2591 Subject: A start job for unit NetworkManager-dispatcher.service has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit NetworkManager-dispatcher.service has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1773.\nJul 07 20:15:57 managed-node2 systemd[1]: run-netns-netns\\x2db2b0269b\\x2d6f52\\x2d704b\\x2de0f2\\x2d936fd9832ebd.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit run-netns-netns\\x2db2b0269b\\x2d6f52\\x2d704b\\x2de0f2\\x2d936fd9832ebd.mount has successfully entered the 'dead' state.\nJul 07 20:15:57 managed-node2 systemd[1]: var-lib-containers-storage-overlay\\x2dcontainers-e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641-rootfs-merge.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay\\x2dcontainers-e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641-rootfs-merge.mount has successfully entered the 'dead' state.\nJul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.72272296 -0400 EDT m=+10.348578562 container cleanup e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641 (image=, name=0b34d5e9f949-infra, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service)\nJul 07 20:15:57 managed-node2 systemd[1]: Removed slice cgroup machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice.\n\u2591\u2591 Subject: A stop job for unit machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1839 and the job result is done.\nJul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.730067956 -0400 EDT m=+10.355923539 pod stop 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36 (image=, name=httpd3)\nJul 07 20:15:57 managed-node2 systemd[1]: machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice: Failed to open /run/systemd/transient/machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice: No such file or directory\nJul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.736214931 -0400 EDT m=+10.362070507 pod stop 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36 (image=, name=httpd3)\nJul 07 20:15:57 managed-node2 systemd[1]: machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice: Failed to open /run/systemd/transient/machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice: No such file or directory\nJul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.760284407 -0400 EDT m=+10.386140034 container remove 9ce8d8a70651d58c6ca07c9d44a209f96a26e91aae1398723c538551b3ab9109 (image=quay.io/libpod/testimage:20210610, name=httpd3-httpd3, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, app=test, created_at=2021-06-10T18:55:36Z, created_by=test/system/build-testimage, io.buildah.version=1.21.0, io.containers.autoupdate=registry, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service)\nJul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.786340751 -0400 EDT m=+10.412196374 container remove e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641 (image=, name=0b34d5e9f949-infra, pod_id=0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service)\nJul 07 20:15:57 managed-node2 systemd[1]: machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice: Failed to open /run/systemd/transient/machine-libpod_pod_0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36.slice: No such file or directory\nJul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.794425826 -0400 EDT m=+10.420281396 pod remove 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36 (image=, name=httpd3)\nJul 07 20:15:57 managed-node2 systemd[1]: libpod-eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc.scope: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit libpod-eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc.scope has successfully entered the 'dead' state.\nJul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.797955293 -0400 EDT m=+10.423811069 container kill eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc (image=, name=55c9b4d93968-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service)\nJul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.805138661 -0400 EDT m=+10.430994476 container died eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc (image=, name=55c9b4d93968-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service)\nJul 07 20:15:57 managed-node2 podman[48533]: 2025-07-07 20:15:57.86757777 -0400 EDT m=+10.493433378 container remove eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc (image=, name=55c9b4d93968-service, PODMAN_SYSTEMD_UNIT=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service)\nJul 07 20:15:57 managed-node2 podman[48533]: Pods stopped:\nJul 07 20:15:57 managed-node2 podman[48533]: 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36\nJul 07 20:15:57 managed-node2 podman[48533]: Pods removed:\nJul 07 20:15:57 managed-node2 podman[48533]: 0b34d5e9f9494aa603b1c70352acfbff43e81a73db53c069a58f9f12a0cb4a36\nJul 07 20:15:57 managed-node2 podman[48533]: Secrets removed:\nJul 07 20:15:57 managed-node2 podman[48533]: Volumes removed:\nJul 07 20:15:57 managed-node2 systemd[1]: podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service has successfully entered the 'dead' state.\nJul 07 20:15:57 managed-node2 systemd[1]: Stopped A template for running K8s workloads via podman-kube-play.\n\u2591\u2591 Subject: A stop job for unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1771 and the job result is done.\nJul 07 20:15:58 managed-node2 python3.9[48770]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd3.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:15:58 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state.\nJul 07 20:15:58 managed-node2 systemd[1]: var-lib-containers-storage-overlay\\x2dcontainers-e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641-userdata-shm.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay\\x2dcontainers-e56874574148131e1c7c967bc4a6038fff52c83242bee1a2b6e351266a906641-userdata-shm.mount has successfully entered the 'dead' state.\nJul 07 20:15:58 managed-node2 systemd[1]: var-lib-containers-storage-overlay\\x2dcontainers-eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc-rootfs-merge.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay\\x2dcontainers-eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc-rootfs-merge.mount has successfully entered the 'dead' state.\nJul 07 20:15:58 managed-node2 systemd[1]: var-lib-containers-storage-overlay\\x2dcontainers-eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc-userdata-shm.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay\\x2dcontainers-eee21222f5f633aa1263625cbc6b00214e8245da04a134427b0d40f991ee24fc-userdata-shm.mount has successfully entered the 'dead' state.\nJul 07 20:15:58 managed-node2 python3.9[48921]: ansible-containers.podman.podman_play Invoked with state=absent kube_file=/etc/containers/ansible-kubernetes.d/httpd3.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None debug=None quiet=None recreate=None userns=None log_level=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None\nJul 07 20:15:58 managed-node2 python3.9[48921]: ansible-containers.podman.podman_play version: 5.5.1, kube file /etc/containers/ansible-kubernetes.d/httpd3.yml\nJul 07 20:15:58 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state.\nJul 07 20:15:59 managed-node2 python3.9[49083]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d/httpd3.yml state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:16:00 managed-node2 python3.9[49232]: ansible-getent Invoked with database=passwd key=podman_basic_user fail_key=True service=None split=None\nJul 07 20:16:00 managed-node2 python3.9[49382]: ansible-stat Invoked with path=/run/user/3001 follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:01 managed-node2 sudo[49533]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:16:01 managed-node2 sudo[49533]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-eyaoewgeadnpajdfsqkdmnspilftsmzm ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933761.058141-18477-143961090014844/AnsiballZ_podman_container_info.py'\nJul 07 20:16:01 managed-node2 sudo[49533]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0)\nJul 07 20:16:01 managed-node2 python3.9[49535]: ansible-containers.podman.podman_container_info Invoked with executable=podman name=None\nJul 07 20:16:01 managed-node2 systemd[27808]: Started podman-49536.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 156.\nJul 07 20:16:01 managed-node2 sudo[49533]: pam_unix(sudo:session): session closed for user podman_basic_user\nJul 07 20:16:01 managed-node2 sudo[49691]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:16:01 managed-node2 sudo[49691]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-csyhukllkdoqxtgiejqztpcafyureeyp ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933761.7158227-18498-173793406938955/AnsiballZ_command.py'\nJul 07 20:16:01 managed-node2 sudo[49691]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0)\nJul 07 20:16:01 managed-node2 python3.9[49693]: ansible-ansible.legacy.command Invoked with _raw_params=podman network ls -q _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:02 managed-node2 systemd[27808]: Started podman-49694.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 160.\nJul 07 20:16:02 managed-node2 sudo[49691]: pam_unix(sudo:session): session closed for user podman_basic_user\nJul 07 20:16:02 managed-node2 sudo[49850]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:16:02 managed-node2 sudo[49850]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nugdaehmscgbqoulldhoxbffneulpeqi ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933762.1564903-18516-230631949404561/AnsiballZ_command.py'\nJul 07 20:16:02 managed-node2 sudo[49850]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0)\nJul 07 20:16:02 managed-node2 python3.9[49852]: ansible-ansible.legacy.command Invoked with _raw_params=podman secret ls -n -q _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:02 managed-node2 systemd[27808]: Started podman-49853.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 164.\nJul 07 20:16:02 managed-node2 sudo[49850]: pam_unix(sudo:session): session closed for user podman_basic_user\nJul 07 20:16:02 managed-node2 python3.9[50009]: ansible-ansible.legacy.command Invoked with removes=/var/lib/systemd/linger/podman_basic_user _raw_params=loginctl disable-linger podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None stdin=None\nJul 07 20:16:02 managed-node2 systemd[1]: Stopping User Manager for UID 3001...\n\u2591\u2591 Subject: A stop job for unit user@3001.service has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit user@3001.service has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1840.\nJul 07 20:16:02 managed-node2 systemd[27808]: Activating special unit Exit the Session...\nJul 07 20:16:02 managed-node2 systemd[27808]: Stopping podman-pause-7fbe17c5.scope...\n\u2591\u2591 Subject: A stop job for unit UNIT has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 181.\nJul 07 20:16:02 managed-node2 systemd[27808]: Removed slice Slice /app/podman-kube.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 183 and the job result is done.\nJul 07 20:16:02 managed-node2 systemd[27808]: Removed slice cgroup user-libpod_pod_f60d38092e129513b51a0b2f07fc5347e46ea863cdd2a5b1d5752245e63e591c.slice.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 180 and the job result is done.\nJul 07 20:16:02 managed-node2 systemd[27808]: Stopped target Main User Target.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 174 and the job result is done.\nJul 07 20:16:02 managed-node2 systemd[27808]: Stopped target Basic System.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 187 and the job result is done.\nJul 07 20:16:02 managed-node2 systemd[27808]: Stopped target Paths.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 185 and the job result is done.\nJul 07 20:16:02 managed-node2 systemd[27808]: Stopped target Sockets.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 173 and the job result is done.\nJul 07 20:16:02 managed-node2 systemd[27808]: Stopped target Timers.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 188 and the job result is done.\nJul 07 20:16:02 managed-node2 systemd[27808]: Stopped Mark boot as successful after the user session has run 2 minutes.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 186 and the job result is done.\nJul 07 20:16:02 managed-node2 systemd[27808]: Stopped Daily Cleanup of User's Temporary Directories.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 172 and the job result is done.\nJul 07 20:16:02 managed-node2 dbus-broker[28296]: Dispatched 2118 messages @ 3(\u00b115)\u03bcs / message.\n\u2591\u2591 Subject: Dispatched 2118 messages\n\u2591\u2591 Defined-By: dbus-broker\n\u2591\u2591 Support: https://groups.google.com/forum/#!forum/bus1-devel\n\u2591\u2591 \n\u2591\u2591 This message is printed by dbus-broker when shutting down. It includes metric\n\u2591\u2591 information collected during the runtime of dbus-broker.\n\u2591\u2591 \n\u2591\u2591 The message lists the number of dispatched messages\n\u2591\u2591 (in this case 2118) as well as the mean time to\n\u2591\u2591 handling a single message. The time measurements exclude the time spent on\n\u2591\u2591 writing to and reading from the kernel.\nJul 07 20:16:02 managed-node2 systemd[27808]: Stopping D-Bus User Message Bus...\n\u2591\u2591 Subject: A stop job for unit UNIT has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 171.\nJul 07 20:16:02 managed-node2 systemd[27808]: Stopped Create User's Volatile Files and Directories.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 176 and the job result is done.\nJul 07 20:16:02 managed-node2 systemd[27808]: Stopped D-Bus User Message Bus.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 171 and the job result is done.\nJul 07 20:16:02 managed-node2 systemd[27808]: Stopped podman-pause-7fbe17c5.scope.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 181 and the job result is done.\nJul 07 20:16:02 managed-node2 systemd[27808]: Removed slice Slice /user.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 179 and the job result is done.\nJul 07 20:16:02 managed-node2 systemd[27808]: Closed D-Bus User Message Bus Socket.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 175 and the job result is done.\nJul 07 20:16:02 managed-node2 systemd[27808]: Removed slice User Application Slice.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 184 and the job result is done.\nJul 07 20:16:02 managed-node2 systemd[27808]: Reached target Shutdown.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 170.\nJul 07 20:16:02 managed-node2 systemd[27808]: Finished Exit the Session.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 169.\nJul 07 20:16:02 managed-node2 systemd[27808]: Reached target Exit the Session.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 168.\nJul 07 20:16:02 managed-node2 systemd[1]: user@3001.service: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit user@3001.service has successfully entered the 'dead' state.\nJul 07 20:16:02 managed-node2 systemd[1]: Stopped User Manager for UID 3001.\n\u2591\u2591 Subject: A stop job for unit user@3001.service has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit user@3001.service has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1840 and the job result is done.\nJul 07 20:16:02 managed-node2 systemd[1]: user@3001.service: Consumed 2.173s CPU time.\n\u2591\u2591 Subject: Resources consumed by unit runtime\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit user@3001.service completed and consumed the indicated resources.\nJul 07 20:16:02 managed-node2 systemd[1]: Stopping User Runtime Directory /run/user/3001...\n\u2591\u2591 Subject: A stop job for unit user-runtime-dir@3001.service has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit user-runtime-dir@3001.service has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1841.\nJul 07 20:16:03 managed-node2 systemd[1]: run-user-3001.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit run-user-3001.mount has successfully entered the 'dead' state.\nJul 07 20:16:03 managed-node2 systemd[1]: user-runtime-dir@3001.service: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit user-runtime-dir@3001.service has successfully entered the 'dead' state.\nJul 07 20:16:03 managed-node2 systemd[1]: Stopped User Runtime Directory /run/user/3001.\n\u2591\u2591 Subject: A stop job for unit user-runtime-dir@3001.service has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit user-runtime-dir@3001.service has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1841 and the job result is done.\nJul 07 20:16:03 managed-node2 systemd[1]: Removed slice User Slice of UID 3001.\n\u2591\u2591 Subject: A stop job for unit user-3001.slice has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit user-3001.slice has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1843 and the job result is done.\nJul 07 20:16:03 managed-node2 systemd[1]: user-3001.slice: Consumed 2.196s CPU time.\n\u2591\u2591 Subject: Resources consumed by unit runtime\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit user-3001.slice completed and consumed the indicated resources.\nJul 07 20:16:03 managed-node2 python3.9[50161]: ansible-ansible.legacy.command Invoked with _raw_params=loginctl show-user --value -p State podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:04 managed-node2 sudo[50311]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:16:04 managed-node2 sudo[50311]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dpicetofkbhrscpezuhafexsvxxmiwru ; /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933764.0345392-18589-160568711821160/AnsiballZ_command.py'\nJul 07 20:16:04 managed-node2 sudo[50311]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0)\nJul 07 20:16:04 managed-node2 python3.9[50313]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod exists httpd1 _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:04 managed-node2 sudo[50311]: pam_unix(sudo:session): session closed for user podman_basic_user\nJul 07 20:16:04 managed-node2 python3.9[50468]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod exists httpd2 _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:04 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state.\nJul 07 20:16:05 managed-node2 python3.9[50624]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod exists httpd3 _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:05 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state.\nJul 07 20:16:05 managed-node2 sudo[50781]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:16:05 managed-node2 sudo[50781]: root : TTY=pts/0 ; PWD=/root ; USER=podman_basic_user ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ulqlnfrznxyujwhsbktkenenjnaaarpn ; XDG_RUNTIME_DIR=/run/user/3001 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933765.2955933-18640-73245982266512/AnsiballZ_command.py'\nJul 07 20:16:05 managed-node2 sudo[50781]: pam_unix(sudo:session): session opened for user podman_basic_user(uid=3001) by root(uid=0)\nJul 07 20:16:05 managed-node2 python3.9[50783]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail\n systemctl --user list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd1[.]yml[.]service[ ]+loaded[ ]+active '\n _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:05 managed-node2 sudo[50781]: pam_unix(sudo:session): session closed for user podman_basic_user\nJul 07 20:16:05 managed-node2 python3.9[50935]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail\n systemctl --system list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd2[.]yml[.]service[ ]+loaded[ ]+active '\n _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:06 managed-node2 python3.9[51087]: ansible-ansible.legacy.command Invoked with _raw_params=set -euo pipefail\n systemctl --system list-units -a -l --plain | grep -E '^[ ]*podman-kube@.+-httpd3[.]yml[.]service[ ]+loaded[ ]+active '\n _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:06 managed-node2 python3.9[51239]: ansible-stat Invoked with path=/var/lib/systemd/linger/podman_basic_user follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:07 managed-node2 systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit NetworkManager-dispatcher.service has successfully entered the 'dead' state.\nJul 07 20:16:09 managed-node2 python3.9[51537]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:10 managed-node2 python3.9[51692]: ansible-getent Invoked with database=passwd key=root fail_key=False service=None split=None\nJul 07 20:16:10 managed-node2 python3.9[51842]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:14 managed-node2 python3.9[51993]: ansible-getent Invoked with database=passwd key=podman_basic_user fail_key=False service=None split=None\nJul 07 20:16:14 managed-node2 python3.9[52143]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:15 managed-node2 python3.9[52294]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:15 managed-node2 python3.9[52444]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g podman_basic_user _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:17 managed-node2 python3.9[52594]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:17 managed-node2 python3.9[52744]: ansible-stat Invoked with path=/run/user/3001 follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:17 managed-node2 python3.9[52893]: ansible-stat Invoked with path=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:18 managed-node2 python3.9[53042]: ansible-file Invoked with path=/home/podman_basic_user/.config/containers/ansible-kubernetes.d/httpd1.yml state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:16:19 managed-node2 python3.9[53191]: ansible-getent Invoked with database=passwd key=root fail_key=False service=None split=None\nJul 07 20:16:20 managed-node2 python3.9[53341]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:21 managed-node2 python3.9[53492]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/httpd2.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:21 managed-node2 python3.9[53642]: ansible-systemd Invoked with name=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd2.yml.service scope=system state=stopped enabled=False daemon_reload=False daemon_reexec=False no_block=False force=None masked=None\nJul 07 20:16:22 managed-node2 python3.9[53793]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd2.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:22 managed-node2 python3.9[53942]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d/httpd2.yml state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:16:23 managed-node2 python3.9[54091]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:25 managed-node2 python3.9[54242]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/httpd3.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:25 managed-node2 python3.9[54392]: ansible-systemd Invoked with name=podman-kube@-etc-containers-ansible\\x2dkubernetes.d-httpd3.yml.service scope=system state=stopped enabled=False daemon_reload=False daemon_reexec=False no_block=False force=None masked=None\nJul 07 20:16:26 managed-node2 python3.9[54543]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/httpd3.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:26 managed-node2 python3.9[54692]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d/httpd3.yml state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:16:27 managed-node2 python3.9[54841]: ansible-getent Invoked with database=passwd key=podman_basic_user fail_key=True service=None split=None\nJul 07 20:16:28 managed-node2 python3.9[54991]: ansible-stat Invoked with path=/run/user/3001 follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:29 managed-node2 python3.9[55140]: ansible-file Invoked with path=/etc/containers/storage.conf state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:16:30 managed-node2 python3.9[55289]: ansible-file Invoked with path=/tmp/lsr_8xkyz6d8_podman state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:16:33 managed-node2 python3.9[55487]: ansible-ansible.legacy.setup Invoked with gather_subset=['all'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d\nJul 07 20:16:34 managed-node2 python3.9[55662]: ansible-stat Invoked with path=/run/ostree-booted follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:34 managed-node2 python3.9[55811]: ansible-stat Invoked with path=/sbin/transactional-update follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:36 managed-node2 python3.9[56109]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:37 managed-node2 python3.9[56264]: ansible-getent Invoked with database=passwd key=root fail_key=False service=None split=None\nJul 07 20:16:37 managed-node2 python3.9[56414]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:40 managed-node2 python3.9[56565]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:41 managed-node2 python3.9[56716]: ansible-file Invoked with path=/etc/containers/systemd state=directory owner=root group=0 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:16:41 managed-node2 python3.9[56865]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/systemd/quadlet-pod-pod.pod follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True\nJul 07 20:16:42 managed-node2 python3.9[56985]: ansible-ansible.legacy.copy Invoked with src=/root/.ansible/tmp/ansible-tmp-1751933801.5835004-19965-109711770661066/.source.pod dest=/etc/containers/systemd/quadlet-pod-pod.pod owner=root group=0 mode=0644 follow=False _original_basename=systemd.j2 checksum=1884c880482430d8bf2e944b003734fb8b7a462d backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:16:42 managed-node2 python3.9[57134]: ansible-systemd Invoked with daemon_reload=True scope=system daemon_reexec=False no_block=False name=None state=None enabled=None force=None masked=None\nJul 07 20:16:42 managed-node2 systemd[1]: Reloading.\nJul 07 20:16:43 managed-node2 systemd-rc-local-generator[57151]: /etc/rc.d/rc.local is not marked executable, skipping.\nJul 07 20:16:43 managed-node2 python3.9[57317]: ansible-systemd Invoked with name=quadlet-pod-pod-pod.service scope=system state=started daemon_reload=False daemon_reexec=False no_block=False enabled=None force=None masked=None\nJul 07 20:16:43 managed-node2 systemd[1]: Starting quadlet-pod-pod-pod.service...\n\u2591\u2591 Subject: A start job for unit quadlet-pod-pod-pod.service has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit quadlet-pod-pod-pod.service has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1845.\nJul 07 20:16:43 managed-node2 systemd[1]: var-lib-containers-storage-overlay-metacopy\\x2dcheck327374229-merged.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay-metacopy\\x2dcheck327374229-merged.mount has successfully entered the 'dead' state.\nJul 07 20:16:43 managed-node2 systemd[1]: Created slice cgroup machine-libpod_pod_e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79.slice.\n\u2591\u2591 Subject: A start job for unit machine-libpod_pod_e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79.slice has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit machine-libpod_pod_e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79.slice has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1916.\nJul 07 20:16:43 managed-node2 podman[57321]: 2025-07-07 20:16:43.753251921 -0400 EDT m=+0.075359120 container create 8854ba6a76c45d1f49cbb40fb6b5ea32b169bc30ffa29374b62851695b180a1c (image=, name=quadlet-pod-infra, pod_id=e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service)\nJul 07 20:16:43 managed-node2 podman[57321]: 2025-07-07 20:16:43.760005549 -0400 EDT m=+0.082112720 pod create e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79 (image=, name=quadlet-pod)\nJul 07 20:16:43 managed-node2 quadlet-pod-pod-pod[57321]: e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79\nJul 07 20:16:43 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state.\nJul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8066] manager: (podman0): new Bridge device (/org/freedesktop/NetworkManager/Devices/9)\nJul 07 20:16:43 managed-node2 kernel: podman0: port 1(veth0) entered blocking state\nJul 07 20:16:43 managed-node2 kernel: podman0: port 1(veth0) entered disabled state\nJul 07 20:16:43 managed-node2 kernel: veth0: entered allmulticast mode\nJul 07 20:16:43 managed-node2 kernel: veth0: entered promiscuous mode\nJul 07 20:16:43 managed-node2 kernel: podman0: port 1(veth0) entered blocking state\nJul 07 20:16:43 managed-node2 kernel: podman0: port 1(veth0) entered forwarding state\nJul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8206] manager: (veth0): new Veth device (/org/freedesktop/NetworkManager/Devices/10)\nJul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8221] device (veth0): carrier: link connected\nJul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8226] device (podman0): carrier: link connected\nJul 07 20:16:43 managed-node2 systemd-udevd[57347]: Network interface NamePolicy= disabled on kernel command line.\nJul 07 20:16:43 managed-node2 systemd-udevd[57348]: Network interface NamePolicy= disabled on kernel command line.\nJul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8651] device (podman0): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external')\nJul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8658] device (podman0): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'external')\nJul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8668] device (podman0): Activation: starting connection 'podman0' (0dc63386-fc14-4ac2-8cee-25b24d1739b5)\nJul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8670] device (podman0): state change: disconnected -> prepare (reason 'none', managed-type: 'external')\nJul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8673] device (podman0): state change: prepare -> config (reason 'none', managed-type: 'external')\nJul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8676] device (podman0): state change: config -> ip-config (reason 'none', managed-type: 'external')\nJul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8679] device (podman0): state change: ip-config -> ip-check (reason 'none', managed-type: 'external')\nJul 07 20:16:43 managed-node2 systemd[1]: Starting Network Manager Script Dispatcher Service...\n\u2591\u2591 Subject: A start job for unit NetworkManager-dispatcher.service has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit NetworkManager-dispatcher.service has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1921.\nJul 07 20:16:43 managed-node2 systemd[1]: Started Network Manager Script Dispatcher Service.\n\u2591\u2591 Subject: A start job for unit NetworkManager-dispatcher.service has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit NetworkManager-dispatcher.service has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1921.\nJul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8930] device (podman0): state change: ip-check -> secondaries (reason 'none', managed-type: 'external')\nJul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8932] device (podman0): state change: secondaries -> activated (reason 'none', managed-type: 'external')\nJul 07 20:16:43 managed-node2 NetworkManager[644]: [1751933803.8938] device (podman0): Activation: successful, device activated.\nJul 07 20:16:43 managed-node2 systemd[1]: Started libcrun container.\n\u2591\u2591 Subject: A start job for unit libpod-8854ba6a76c45d1f49cbb40fb6b5ea32b169bc30ffa29374b62851695b180a1c.scope has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit libpod-8854ba6a76c45d1f49cbb40fb6b5ea32b169bc30ffa29374b62851695b180a1c.scope has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1987.\nJul 07 20:16:43 managed-node2 podman[57329]: 2025-07-07 20:16:43.975146229 -0400 EDT m=+0.200627141 container init 8854ba6a76c45d1f49cbb40fb6b5ea32b169bc30ffa29374b62851695b180a1c (image=, name=quadlet-pod-infra, pod_id=e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service)\nJul 07 20:16:43 managed-node2 podman[57329]: 2025-07-07 20:16:43.981266459 -0400 EDT m=+0.206747252 container start 8854ba6a76c45d1f49cbb40fb6b5ea32b169bc30ffa29374b62851695b180a1c (image=, name=quadlet-pod-infra, pod_id=e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79, PODMAN_SYSTEMD_UNIT=quadlet-pod-pod-pod.service)\nJul 07 20:16:43 managed-node2 podman[57329]: 2025-07-07 20:16:43.987430031 -0400 EDT m=+0.212910758 pod start e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79 (image=, name=quadlet-pod)\nJul 07 20:16:43 managed-node2 quadlet-pod-pod-pod[57329]: quadlet-pod\nJul 07 20:16:43 managed-node2 systemd[1]: Started quadlet-pod-pod-pod.service.\n\u2591\u2591 Subject: A start job for unit quadlet-pod-pod-pod.service has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit quadlet-pod-pod-pod.service has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1845.\nJul 07 20:16:44 managed-node2 python3.9[57565]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:46 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state.\nJul 07 20:16:46 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state.\nJul 07 20:16:46 managed-node2 podman[57748]: 2025-07-07 20:16:46.651148299 -0400 EDT m=+0.387653928 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610\nJul 07 20:16:47 managed-node2 python3.9[57912]: ansible-file Invoked with path=/etc/containers/systemd state=directory owner=root group=0 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:16:47 managed-node2 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state.\nJul 07 20:16:47 managed-node2 python3.9[58061]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/systemd/quadlet-pod-container.container follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True\nJul 07 20:16:47 managed-node2 python3.9[58181]: ansible-ansible.legacy.copy Invoked with src=/root/.ansible/tmp/ansible-tmp-1751933807.2449324-20069-121688553430320/.source.container dest=/etc/containers/systemd/quadlet-pod-container.container owner=root group=0 mode=0644 follow=False _original_basename=systemd.j2 checksum=f0b5c8159fc3c65bf9310a371751609e4c1ba4c3 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:16:48 managed-node2 python3.9[58330]: ansible-systemd Invoked with daemon_reload=True scope=system daemon_reexec=False no_block=False name=None state=None enabled=None force=None masked=None\nJul 07 20:16:48 managed-node2 systemd[1]: Reloading.\nJul 07 20:16:48 managed-node2 systemd-rc-local-generator[58347]: /etc/rc.d/rc.local is not marked executable, skipping.\nJul 07 20:16:48 managed-node2 python3.9[58513]: ansible-systemd Invoked with name=quadlet-pod-container.service scope=system state=started daemon_reload=False daemon_reexec=False no_block=False enabled=None force=None masked=None\nJul 07 20:16:49 managed-node2 systemd[1]: Starting quadlet-pod-container.service...\n\u2591\u2591 Subject: A start job for unit quadlet-pod-container.service has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit quadlet-pod-container.service has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1992.\nJul 07 20:16:49 managed-node2 podman[58517]: 2025-07-07 20:16:49.074860568 -0400 EDT m=+0.046591683 container create b345bf186b4d8ce4960e19da7b04d5b12bd2095620bf2b36a22c1a624a5edc3e (image=quay.io/libpod/testimage:20210610, name=quadlet-pod-container, pod_id=e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79, PODMAN_SYSTEMD_UNIT=quadlet-pod-container.service, created_by=test/system/build-testimage, io.buildah.version=1.21.0, created_at=2021-06-10T18:55:36Z)\nJul 07 20:16:49 managed-node2 systemd[1]: var-lib-containers-storage-overlay-volatile\\x2dcheck976746358-merged.mount: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit var-lib-containers-storage-overlay-volatile\\x2dcheck976746358-merged.mount has successfully entered the 'dead' state.\nJul 07 20:16:49 managed-node2 podman[58517]: 2025-07-07 20:16:49.117361038 -0400 EDT m=+0.089092248 container init b345bf186b4d8ce4960e19da7b04d5b12bd2095620bf2b36a22c1a624a5edc3e (image=quay.io/libpod/testimage:20210610, name=quadlet-pod-container, pod_id=e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79, created_by=test/system/build-testimage, io.buildah.version=1.21.0, created_at=2021-06-10T18:55:36Z, PODMAN_SYSTEMD_UNIT=quadlet-pod-container.service)\nJul 07 20:16:49 managed-node2 systemd[1]: Started quadlet-pod-container.service.\n\u2591\u2591 Subject: A start job for unit quadlet-pod-container.service has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit quadlet-pod-container.service has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1992.\nJul 07 20:16:49 managed-node2 podman[58517]: 2025-07-07 20:16:49.122201448 -0400 EDT m=+0.093932749 container start b345bf186b4d8ce4960e19da7b04d5b12bd2095620bf2b36a22c1a624a5edc3e (image=quay.io/libpod/testimage:20210610, name=quadlet-pod-container, pod_id=e3b447f755bf797dc1c1eae5796a315cce75f2e44133fcafee8c0e1d17df3a79, created_at=2021-06-10T18:55:36Z, PODMAN_SYSTEMD_UNIT=quadlet-pod-container.service, created_by=test/system/build-testimage, io.buildah.version=1.21.0)\nJul 07 20:16:49 managed-node2 quadlet-pod-container[58517]: b345bf186b4d8ce4960e19da7b04d5b12bd2095620bf2b36a22c1a624a5edc3e\nJul 07 20:16:49 managed-node2 podman[58517]: 2025-07-07 20:16:49.05249592 -0400 EDT m=+0.024227291 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610\nJul 07 20:16:49 managed-node2 python3.9[58679]: ansible-ansible.legacy.command Invoked with _raw_params=cat /etc/containers/systemd/quadlet-pod-container.container _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:49 managed-node2 python3.9[58829]: ansible-ansible.legacy.command Invoked with _raw_params=cat /etc/containers/systemd/quadlet-pod-pod.pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:50 managed-node2 python3.9[58979]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod inspect quadlet-pod --format '{{range .Containers}}{{.Name}}\n {{end}}' _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:50 managed-node2 python3.9[59137]: ansible-user Invoked with name=user_quadlet_pod uid=2223 state=present non_unique=False force=False remove=False create_home=True system=False move_home=False append=False ssh_key_bits=0 ssh_key_type=rsa ssh_key_comment=ansible-generated on managed-node2 update_password=always group=None groups=None comment=None home=None shell=None password=NOT_LOGGING_PARAMETER login_class=None password_expire_max=None password_expire_min=None password_expire_warn=None hidden=None seuser=None skeleton=None generate_ssh_key=None ssh_key_file=None ssh_key_passphrase=NOT_LOGGING_PARAMETER expires=None password_lock=None local=None profile=None authorization=None role=None umask=None\nJul 07 20:16:50 managed-node2 useradd[59139]: new group: name=user_quadlet_pod, GID=2223\nJul 07 20:16:50 managed-node2 useradd[59139]: new user: name=user_quadlet_pod, UID=2223, GID=2223, home=/home/user_quadlet_pod, shell=/bin/bash, from=/dev/pts/0\nJul 07 20:16:50 managed-node2 rsyslogd[812]: imjournal: journal files changed, reloading... [v8.2412.0-2.el9 try https://www.rsyslog.com/e/0 ]\nJul 07 20:16:52 managed-node2 python3.9[59444]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:53 managed-node2 python3.9[59599]: ansible-getent Invoked with database=passwd key=user_quadlet_pod fail_key=False service=None split=None\nJul 07 20:16:53 managed-node2 python3.9[59749]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:53 managed-node2 systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully.\n\u2591\u2591 Subject: Unit succeeded\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit NetworkManager-dispatcher.service has successfully entered the 'dead' state.\nJul 07 20:16:54 managed-node2 python3.9[59900]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:54 managed-node2 python3.9[60050]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:56 managed-node2 python3.9[60200]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:16:57 managed-node2 python3.9[60351]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:57 managed-node2 python3.9[60501]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:16:58 managed-node2 python3.9[60651]: ansible-ansible.legacy.command Invoked with creates=/var/lib/systemd/linger/user_quadlet_pod _raw_params=loginctl enable-linger user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None removes=None stdin=None\nJul 07 20:16:58 managed-node2 systemd[1]: Created slice User Slice of UID 2223.\n\u2591\u2591 Subject: A start job for unit user-2223.slice has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit user-2223.slice has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 2130.\nJul 07 20:16:58 managed-node2 systemd[1]: Starting User Runtime Directory /run/user/2223...\n\u2591\u2591 Subject: A start job for unit user-runtime-dir@2223.service has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit user-runtime-dir@2223.service has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 2065.\nJul 07 20:16:58 managed-node2 systemd[1]: Finished User Runtime Directory /run/user/2223.\n\u2591\u2591 Subject: A start job for unit user-runtime-dir@2223.service has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit user-runtime-dir@2223.service has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 2065.\nJul 07 20:16:58 managed-node2 systemd[1]: Starting User Manager for UID 2223...\n\u2591\u2591 Subject: A start job for unit user@2223.service has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit user@2223.service has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 2064.\nJul 07 20:16:58 managed-node2 systemd[60658]: pam_unix(systemd-user:session): session opened for user user_quadlet_pod(uid=2223) by user_quadlet_pod(uid=0)\nJul 07 20:16:58 managed-node2 systemd[60658]: Queued start job for default target Main User Target.\nJul 07 20:16:58 managed-node2 systemd[60658]: Created slice User Application Slice.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 4.\nJul 07 20:16:58 managed-node2 systemd[60658]: Started Mark boot as successful after the user session has run 2 minutes.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 8.\nJul 07 20:16:58 managed-node2 systemd[60658]: Started Daily Cleanup of User's Temporary Directories.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 9.\nJul 07 20:16:58 managed-node2 systemd[60658]: Reached target Paths.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 12.\nJul 07 20:16:58 managed-node2 systemd[60658]: Reached target Timers.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 7.\nJul 07 20:16:58 managed-node2 systemd[60658]: Starting D-Bus User Message Bus Socket...\n\u2591\u2591 Subject: A start job for unit UNIT has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 11.\nJul 07 20:16:58 managed-node2 systemd[60658]: Starting Create User's Volatile Files and Directories...\n\u2591\u2591 Subject: A start job for unit UNIT has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 3.\nJul 07 20:16:58 managed-node2 systemd[60658]: Listening on D-Bus User Message Bus Socket.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 11.\nJul 07 20:16:58 managed-node2 systemd[60658]: Finished Create User's Volatile Files and Directories.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 3.\nJul 07 20:16:58 managed-node2 systemd[60658]: Reached target Sockets.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 10.\nJul 07 20:16:58 managed-node2 systemd[60658]: Reached target Basic System.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 2.\nJul 07 20:16:58 managed-node2 systemd[60658]: Reached target Main User Target.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 1.\nJul 07 20:16:58 managed-node2 systemd[60658]: Startup finished in 65ms.\n\u2591\u2591 Subject: User manager start-up is now complete\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The user manager instance for user 2223 has been started. All services queued\n\u2591\u2591 for starting have been started. Note that other services might still be starting\n\u2591\u2591 up or be started at any later time.\n\u2591\u2591 \n\u2591\u2591 Startup of the manager took 65603 microseconds.\nJul 07 20:16:58 managed-node2 systemd[1]: Started User Manager for UID 2223.\n\u2591\u2591 Subject: A start job for unit user@2223.service has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit user@2223.service has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 2064.\nJul 07 20:16:59 managed-node2 python3.9[60817]: ansible-file Invoked with path=/home/user_quadlet_pod/.config/containers/systemd state=directory owner=user_quadlet_pod group=2223 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:16:59 managed-node2 python3.9[60966]: ansible-ansible.legacy.stat Invoked with path=/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-pod.pod follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True\nJul 07 20:17:00 managed-node2 python3.9[61086]: ansible-ansible.legacy.copy Invoked with src=/root/.ansible/tmp/ansible-tmp-1751933819.4453251-20404-152886448732131/.source.pod dest=/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-pod.pod owner=user_quadlet_pod group=2223 mode=0644 follow=False _original_basename=systemd.j2 checksum=1884c880482430d8bf2e944b003734fb8b7a462d backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:17:00 managed-node2 sudo[61235]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:17:00 managed-node2 sudo[61235]: root : TTY=pts/0 ; PWD=/root ; USER=user_quadlet_pod ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qycimnmrylvnpkxuzbdovgpddpoemvav ; XDG_RUNTIME_DIR=/run/user/2223 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933820.1430469-20434-39126838474860/AnsiballZ_systemd.py'\nJul 07 20:17:00 managed-node2 sudo[61235]: pam_unix(sudo:session): session opened for user user_quadlet_pod(uid=2223) by root(uid=0)\nJul 07 20:17:00 managed-node2 python3.9[61237]: ansible-systemd Invoked with daemon_reload=True scope=user daemon_reexec=False no_block=False name=None state=None enabled=None force=None masked=None\nJul 07 20:17:00 managed-node2 python3.9[61237]: ansible-systemd [WARNING] Module remote_tmp /home/user_quadlet_pod/.ansible/tmp did not exist and was created with a mode of 0700, this may cause issues when running as another user. To avoid this, create the remote_tmp dir with the correct permissions manually\nJul 07 20:17:00 managed-node2 systemd[60658]: Reloading.\nJul 07 20:17:00 managed-node2 sudo[61235]: pam_unix(sudo:session): session closed for user user_quadlet_pod\nJul 07 20:17:00 managed-node2 sudo[61397]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:17:00 managed-node2 sudo[61397]: root : TTY=pts/0 ; PWD=/root ; USER=user_quadlet_pod ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nnnqmwerpaczbueaimzsjafnuajzqvhs ; XDG_RUNTIME_DIR=/run/user/2223 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933820.7074661-20450-27497593035399/AnsiballZ_systemd.py'\nJul 07 20:17:00 managed-node2 sudo[61397]: pam_unix(sudo:session): session opened for user user_quadlet_pod(uid=2223) by root(uid=0)\nJul 07 20:17:01 managed-node2 python3.9[61399]: ansible-systemd Invoked with name=quadlet-pod-pod-pod.service scope=user state=started daemon_reload=False daemon_reexec=False no_block=False enabled=None force=None masked=None\nJul 07 20:17:01 managed-node2 systemd[60658]: Starting Wait for system level network-online.target as user....\n\u2591\u2591 Subject: A start job for unit UNIT has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 25.\nJul 07 20:17:01 managed-node2 sh[61403]: active\nJul 07 20:17:01 managed-node2 systemd[60658]: Finished Wait for system level network-online.target as user..\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 25.\nJul 07 20:17:01 managed-node2 systemd[60658]: Starting quadlet-pod-pod-pod.service...\n\u2591\u2591 Subject: A start job for unit UNIT has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 13.\nJul 07 20:17:01 managed-node2 systemd[60658]: Starting D-Bus User Message Bus...\n\u2591\u2591 Subject: A start job for unit UNIT has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 26.\nJul 07 20:17:01 managed-node2 dbus-broker-launch[61428]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +31: Eavesdropping is deprecated and ignored\nJul 07 20:17:01 managed-node2 systemd[60658]: Started D-Bus User Message Bus.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 26.\nJul 07 20:17:01 managed-node2 dbus-broker-launch[61428]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +33: Eavesdropping is deprecated and ignored\nJul 07 20:17:01 managed-node2 dbus-broker-lau[61428]: Ready\nJul 07 20:17:01 managed-node2 systemd[60658]: Created slice Slice /user.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 32.\nJul 07 20:17:01 managed-node2 systemd[60658]: Created slice cgroup user-libpod_pod_abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658.slice.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 31.\nJul 07 20:17:01 managed-node2 quadlet-pod-pod-pod[61411]: abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658\nJul 07 20:17:01 managed-node2 systemd[60658]: podman-pause-d252ab55.scope: unit configures an IP firewall, but not running as root.\nJul 07 20:17:01 managed-node2 systemd[60658]: (This warning is only shown for the first unit using IP firewalling.)\nJul 07 20:17:01 managed-node2 systemd[60658]: Started podman-pause-d252ab55.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 35.\nJul 07 20:17:01 managed-node2 systemd[60658]: Started libcrun container.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 39.\nJul 07 20:17:01 managed-node2 quadlet-pod-pod-pod[61431]: quadlet-pod\nJul 07 20:17:01 managed-node2 systemd[60658]: Started quadlet-pod-pod-pod.service.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 13.\nJul 07 20:17:01 managed-node2 sudo[61397]: pam_unix(sudo:session): session closed for user user_quadlet_pod\nJul 07 20:17:02 managed-node2 python3.9[61604]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:17:02 managed-node2 python3.9[61755]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:17:02 managed-node2 python3.9[61905]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:17:04 managed-node2 python3.9[62055]: ansible-ansible.legacy.command Invoked with creates=/var/lib/systemd/linger/user_quadlet_pod _raw_params=loginctl enable-linger user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None removes=None stdin=None\nJul 07 20:17:04 managed-node2 sudo[62204]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:17:04 managed-node2 sudo[62204]: root : TTY=pts/0 ; PWD=/root ; USER=user_quadlet_pod ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-clqautntenvhsnblukvtrjsozolrrutk ; XDG_RUNTIME_DIR=/run/user/2223 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933824.2704933-20565-153115325770627/AnsiballZ_podman_image.py'\nJul 07 20:17:04 managed-node2 sudo[62204]: pam_unix(sudo:session): session opened for user user_quadlet_pod(uid=2223) by root(uid=0)\nJul 07 20:17:04 managed-node2 systemd[60658]: Started podman-62207.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 44.\nJul 07 20:17:04 managed-node2 systemd[60658]: Started podman-62215.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 48.\nJul 07 20:17:05 managed-node2 systemd[60658]: Started podman-62241.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 52.\nJul 07 20:17:05 managed-node2 sudo[62204]: pam_unix(sudo:session): session closed for user user_quadlet_pod\nJul 07 20:17:05 managed-node2 python3.9[62397]: ansible-file Invoked with path=/home/user_quadlet_pod/.config/containers/systemd state=directory owner=user_quadlet_pod group=2223 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:17:06 managed-node2 python3.9[62546]: ansible-ansible.legacy.stat Invoked with path=/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-container.container follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True\nJul 07 20:17:06 managed-node2 python3.9[62666]: ansible-ansible.legacy.copy Invoked with src=/root/.ansible/tmp/ansible-tmp-1751933826.0010831-20615-171147992892008/.source.container dest=/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-container.container owner=user_quadlet_pod group=2223 mode=0644 follow=False _original_basename=systemd.j2 checksum=f0b5c8159fc3c65bf9310a371751609e4c1ba4c3 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None\nJul 07 20:17:06 managed-node2 sudo[62815]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:17:06 managed-node2 sudo[62815]: root : TTY=pts/0 ; PWD=/root ; USER=user_quadlet_pod ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-wvdbdvervsgjztpuoyolguwsbeqeprfj ; XDG_RUNTIME_DIR=/run/user/2223 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933826.7366147-20636-253031789963643/AnsiballZ_systemd.py'\nJul 07 20:17:06 managed-node2 sudo[62815]: pam_unix(sudo:session): session opened for user user_quadlet_pod(uid=2223) by root(uid=0)\nJul 07 20:17:07 managed-node2 python3.9[62817]: ansible-systemd Invoked with daemon_reload=True scope=user daemon_reexec=False no_block=False name=None state=None enabled=None force=None masked=None\nJul 07 20:17:07 managed-node2 systemd[60658]: Reloading.\nJul 07 20:17:07 managed-node2 sudo[62815]: pam_unix(sudo:session): session closed for user user_quadlet_pod\nJul 07 20:17:07 managed-node2 sudo[62977]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:17:07 managed-node2 sudo[62977]: root : TTY=pts/0 ; PWD=/root ; USER=user_quadlet_pod ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-uowxubkhbahtejstbxtpvjntqjsyenah ; XDG_RUNTIME_DIR=/run/user/2223 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933827.3300688-20652-5834244785399/AnsiballZ_systemd.py'\nJul 07 20:17:07 managed-node2 sudo[62977]: pam_unix(sudo:session): session opened for user user_quadlet_pod(uid=2223) by root(uid=0)\nJul 07 20:17:07 managed-node2 python3.9[62979]: ansible-systemd Invoked with name=quadlet-pod-container.service scope=user state=started daemon_reload=False daemon_reexec=False no_block=False enabled=None force=None masked=None\nJul 07 20:17:07 managed-node2 systemd[60658]: Starting quadlet-pod-container.service...\n\u2591\u2591 Subject: A start job for unit UNIT has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 56.\nJul 07 20:17:07 managed-node2 systemd[60658]: Started quadlet-pod-container.service.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 56.\nJul 07 20:17:07 managed-node2 quadlet-pod-container[62982]: b13865d1720ba2247935f8a907f43f29957f51e6b9e3476a39eb71da7ad9ebb6\nJul 07 20:17:07 managed-node2 sudo[62977]: pam_unix(sudo:session): session closed for user user_quadlet_pod\nJul 07 20:17:08 managed-node2 python3.9[63145]: ansible-ansible.legacy.command Invoked with _raw_params=cat /home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-container.container _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:17:08 managed-node2 python3.9[63295]: ansible-ansible.legacy.command Invoked with _raw_params=cat /home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-pod.pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:17:09 managed-node2 sudo[63445]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:17:09 managed-node2 sudo[63445]: root : TTY=pts/0 ; PWD=/root ; USER=user_quadlet_pod ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-smtrjtflbvxbkabzbxqvalutkdqpmdkb ; XDG_RUNTIME_DIR=/run/user/2223 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933828.9926739-20712-141410485382073/AnsiballZ_command.py'\nJul 07 20:17:09 managed-node2 sudo[63445]: pam_unix(sudo:session): session opened for user user_quadlet_pod(uid=2223) by root(uid=0)\nJul 07 20:17:09 managed-node2 python3.9[63447]: ansible-ansible.legacy.command Invoked with _raw_params=podman pod inspect quadlet-pod --format '{{range .Containers}}{{.Name}}\n {{end}}' _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:17:09 managed-node2 systemd[60658]: Started podman-63448.scope.\n\u2591\u2591 Subject: A start job for unit UNIT has finished successfully\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A start job for unit UNIT has finished successfully.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 70.\nJul 07 20:17:09 managed-node2 sudo[63445]: pam_unix(sudo:session): session closed for user user_quadlet_pod\nJul 07 20:17:09 managed-node2 python3.9[63604]: ansible-stat Invoked with path=/var/lib/systemd/linger/user_quadlet_pod follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:17:11 managed-node2 python3.9[63904]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:17:12 managed-node2 python3.9[64059]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:17:13 managed-node2 python3.9[64210]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:17:13 managed-node2 python3.9[64360]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:17:15 managed-node2 python3.9[64510]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:17:16 managed-node2 python3.9[64661]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:17:16 managed-node2 python3.9[64811]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:17:17 managed-node2 python3.9[64961]: ansible-stat Invoked with path=/run/user/2223 follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:17:18 managed-node2 sudo[65112]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:17:18 managed-node2 sudo[65112]: root : TTY=pts/0 ; PWD=/root ; USER=user_quadlet_pod ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-mdrwdexxoegkvbfvoaccgnpcnrcnmlbz ; XDG_RUNTIME_DIR=/run/user/2223 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933838.0379803-20995-129290334053056/AnsiballZ_systemd.py'\nJul 07 20:17:18 managed-node2 sudo[65112]: pam_unix(sudo:session): session opened for user user_quadlet_pod(uid=2223) by root(uid=0)\nJul 07 20:17:18 managed-node2 python3.9[65114]: ansible-systemd Invoked with name=quadlet-pod-container.service scope=user state=stopped enabled=False force=True daemon_reload=False daemon_reexec=False no_block=False masked=None\nJul 07 20:17:18 managed-node2 systemd[60658]: Reloading.\nJul 07 20:17:18 managed-node2 systemd[60658]: Stopping quadlet-pod-container.service...\n\u2591\u2591 Subject: A stop job for unit UNIT has begun execution\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has begun execution.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 74.\nJul 07 20:17:28 managed-node2 quadlet-pod-container[65129]: time=\"2025-07-07T20:17:28-04:00\" level=warning msg=\"StopSignal SIGTERM failed to stop container quadlet-pod-container in 10 seconds, resorting to SIGKILL\"\nJul 07 20:17:28 managed-node2 quadlet-pod-container[65129]: b13865d1720ba2247935f8a907f43f29957f51e6b9e3476a39eb71da7ad9ebb6\nJul 07 20:17:28 managed-node2 systemd[60658]: quadlet-pod-container.service: Main process exited, code=exited, status=137/n/a\n\u2591\u2591 Subject: Unit process exited\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 An ExecStart= process belonging to unit UNIT has exited.\n\u2591\u2591 \n\u2591\u2591 The process' exit code is 'exited' and its exit status is 137.\nJul 07 20:17:28 managed-node2 systemd[60658]: Removed slice cgroup user-libpod_pod_abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658.slice.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 75 and the job result is done.\nJul 07 20:17:28 managed-node2 systemd[60658]: user-libpod_pod_abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658.slice: Failed to open /run/user/2223/systemd/transient/user-libpod_pod_abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658.slice: No such file or directory\nJul 07 20:17:28 managed-node2 systemd[60658]: quadlet-pod-container.service: Failed with result 'exit-code'.\n\u2591\u2591 Subject: Unit failed\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 The unit UNIT has entered the 'failed' state with result 'exit-code'.\nJul 07 20:17:28 managed-node2 systemd[60658]: Stopped quadlet-pod-container.service.\n\u2591\u2591 Subject: A stop job for unit UNIT has finished\n\u2591\u2591 Defined-By: systemd\n\u2591\u2591 Support: https://access.redhat.com/support\n\u2591\u2591 \n\u2591\u2591 A stop job for unit UNIT has finished.\n\u2591\u2591 \n\u2591\u2591 The job identifier is 74 and the job result is done.\nJul 07 20:17:28 managed-node2 systemd[60658]: user-libpod_pod_abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658.slice: Failed to open /run/user/2223/systemd/transient/user-libpod_pod_abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658.slice: No such file or directory\nJul 07 20:17:28 managed-node2 quadlet-pod-pod-pod[65162]: quadlet-pod\nJul 07 20:17:28 managed-node2 sudo[65112]: pam_unix(sudo:session): session closed for user user_quadlet_pod\nJul 07 20:17:28 managed-node2 systemd[60658]: user-libpod_pod_abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658.slice: Failed to open /run/user/2223/systemd/transient/user-libpod_pod_abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658.slice: No such file or directory\nJul 07 20:17:28 managed-node2 quadlet-pod-pod-pod[65176]: abd0a651171a4d4d842b67267db038335d998d680a41bb9659d159f935202658\nJul 07 20:17:29 managed-node2 python3.9[65334]: ansible-stat Invoked with path=/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-container.container follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:17:30 managed-node2 python3.9[65634]: ansible-ansible.legacy.command Invoked with _raw_params=set -x\n set -o pipefail\n exec 1>&2\n #podman volume rm --all\n #podman network prune -f\n podman volume ls\n podman network ls\n podman secret ls\n podman container ls\n podman pod ls\n podman images\n systemctl list-units | grep quadlet\n systemctl list-unit-files | grep quadlet\n ls -alrtF /etc/containers/systemd\n /usr/libexec/podman/quadlet -dryrun -v -no-kmsg-log\n _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:17:30 managed-node2 python3.9[65839]: ansible-ansible.legacy.command Invoked with _raw_params=grep type=AVC /var/log/audit/audit.log _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:17:31 managed-node2 python3.9[65989]: ansible-ansible.legacy.command Invoked with _raw_params=journalctl -ex _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:17:33 managed-node2 python3.9[66288]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:17:34 managed-node2 python3.9[66443]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:17:34 managed-node2 python3.9[66594]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:17:34 managed-node2 python3.9[66744]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:17:36 managed-node2 python3.9[66894]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:17:37 managed-node2 python3.9[67045]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:17:37 managed-node2 python3.9[67195]: ansible-ansible.legacy.command Invoked with _raw_params=getsubids -g user_quadlet_pod _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None\nJul 07 20:17:38 managed-node2 python3.9[67345]: ansible-stat Invoked with path=/run/user/2223 follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:17:38 managed-node2 sudo[67496]: pam_unix(sudo:account): password for user root will expire in 0 days\nJul 07 20:17:38 managed-node2 sudo[67496]: root : TTY=pts/0 ; PWD=/root ; USER=user_quadlet_pod ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-rlcbsoretjgzvpwkphyhytxtddgruzuo ; XDG_RUNTIME_DIR=/run/user/2223 /usr/bin/python3.9 /var/tmp/ansible-tmp-1751933858.505194-21528-62534825907089/AnsiballZ_systemd.py'\nJul 07 20:17:38 managed-node2 sudo[67496]: pam_unix(sudo:session): session opened for user user_quadlet_pod(uid=2223) by root(uid=0)\nJul 07 20:17:38 managed-node2 python3.9[67498]: ansible-systemd Invoked with name=quadlet-pod-container.service scope=user state=stopped enabled=False force=True daemon_reload=False daemon_reexec=False no_block=False masked=None\nJul 07 20:17:38 managed-node2 systemd[60658]: Reloading.\nJul 07 20:17:38 managed-node2 sudo[67496]: pam_unix(sudo:session): session closed for user user_quadlet_pod\nJul 07 20:17:39 managed-node2 python3.9[67660]: ansible-stat Invoked with path=/home/user_quadlet_pod/.config/containers/systemd/quadlet-pod-container.container follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1\nJul 07 20:17:40 managed-node2 python3.9[67960]: ansible-ansible.legacy.command Invoked with _raw_params=journalctl -ex _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None", "task_name": "Dump journal", "task_path": "/tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:194" } ] SYSTEM ROLES ERRORS END v1 TASKS RECAP ******************************************************************** Monday 07 July 2025 20:17:40 -0400 (0:00:00.427) 0:01:07.998 *********** =============================================================================== fedora.linux_system_roles.podman : Stop and disable service ------------ 10.92s /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/cleanup_quadlet_spec.yml:12 fedora.linux_system_roles.podman : Ensure container images are present --- 1.19s /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:18 Gathering Facts --------------------------------------------------------- 1.12s /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:9 fedora.linux_system_roles.podman : Ensure container images are present --- 1.08s /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:18 fedora.linux_system_roles.podman : Gather the package facts ------------- 1.03s /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:6 fedora.linux_system_roles.podman : Reload systemctl --------------------- 0.91s /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:87 Debug3 ------------------------------------------------------------------ 0.90s /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:127 fedora.linux_system_roles.podman : Start service ------------------------ 0.87s /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:115 fedora.linux_system_roles.podman : Start service ------------------------ 0.86s /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:115 fedora.linux_system_roles.podman : Gather the package facts ------------- 0.81s /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:6 fedora.linux_system_roles.podman : Gather the package facts ------------- 0.79s /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:6 fedora.linux_system_roles.podman : Gather the package facts ------------- 0.79s /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:6 fedora.linux_system_roles.podman : Ensure quadlet file is present ------- 0.76s /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:75 Check files ------------------------------------------------------------- 0.74s /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:70 fedora.linux_system_roles.podman : Ensure quadlet file is present ------- 0.74s /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:75 Check files ------------------------------------------------------------- 0.73s /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:40 fedora.linux_system_roles.podman : Start service ------------------------ 0.70s /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:115 fedora.linux_system_roles.podman : Ensure quadlet file is present ------- 0.69s /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:75 fedora.linux_system_roles.podman : Ensure quadlet file is present ------- 0.69s /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:75 fedora.linux_system_roles.podman : Reload systemctl --------------------- 0.69s /tmp/collections-AV4/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:87